From 5b065316113b97aadb43e63cc31bb8639f6a6376 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 14 Dec 2018 03:24:26 +0000 Subject: Update to pylint 2.2.2. The tip-pylint tox target correctly reported the invalid use of string formatting. The change here is to: a.) Fix the error that was caught. b.) move to pylint 2.2.2 for the default 'pylint' target. --- cloudinit/sources/DataSourceAzure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e076d5dc..46efca4a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -980,8 +980,8 @@ def read_azure_ovf(contents): raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") if len(lpcs_nodes) > 1: raise BrokenAzureDataSource("found '%d' %ss" % - ("LinuxProvisioningConfigurationSet", - len(lpcs_nodes))) + (len(lpcs_nodes), + "LinuxProvisioningConfigurationSet")) lpcs = lpcs_nodes[0] if not lpcs.hasChildNodes(): -- cgit v1.2.3 From f55bb17ddb2fd64e039057bf7ee50951a0dc93e8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Dec 2018 17:22:45 +0000 Subject: Vmware: Add support for the com.vmware.guestInfo OVF transport. This adds support for reading OVF information over the 'com.vmware.guestInfo' tranport. The current implementation requires vmware-rpctool be installed in the system. LP: #1807466 --- cloudinit/sources/DataSourceOVF.py | 34 ++++++++------ tests/unittests/test_datasource/test_ovf.py | 72 ++++++++++++++++++++++++++--- tests/unittests/test_ds_identify.py | 15 ++++++ tools/ds-identify | 21 +++++++++ 4 files changed, 121 insertions(+), 21 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 045291e7..891d6547 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -232,10 +232,10 @@ class DataSourceOVF(sources.DataSource): GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) else: - np = {'iso': transport_iso9660, - 'vmware-guestd': transport_vmware_guestd, } + np = [('com.vmware.guestInfo', transport_vmware_guestinfo), + ('iso', transport_iso9660)] name = None - for (name, transfunc) in np.items(): + for name, transfunc in np: (contents, _dev, _fname) = transfunc() if contents: break @@ -503,18 +503,22 @@ def transport_iso9660(require_iso=True): return (False, None, None) -def transport_vmware_guestd(): - # http://blogs.vmware.com/vapp/2009/07/ \ - # selfconfiguration-and-the-ovf-environment.html - # try: - # cmd = ['vmware-guestd', '--cmd', 'info-get guestinfo.ovfEnv'] - # (out, err) = subp(cmd) - # return(out, 'guestinfo.ovfEnv', 'vmware-guestd') - # except: - # # would need to error check here and see why this failed - # # to know if log/error should be raised - # return(False, None, None) - return (False, None, None) +def transport_vmware_guestinfo(): + rpctool = "vmware-rpctool" + not_found = (False, None, None) + if not util.which(rpctool): + return not_found + cmd = [rpctool, "info-get guestinfo.ovfEnv"] + try: + out, _err = util.subp(cmd) + if out: + return (out, rpctool, "guestinfo.ovfEnv") + LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + LOG.warning("%s exited with code %d", rpctool, e.exit_code) + LOG.debug(e) + return not_found def find_child(node, filter_func): diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index a226c032..e4af0fa3 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -17,6 +17,8 @@ from cloudinit.sources import DataSourceOVF as dsovf from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptNotFound) +MPATH = 'cloudinit.sources.DataSourceOVF.' + OVF_ENV_CONTENT = """ \n/dev/null 2>&1 || return 1 + local out="" ret="" + out=$(vmware-rpctool "info-get guestinfo.ovfEnv" 2>&1) + ret=$? + if [ $ret -ne 0 ]; then + debug 1 "Running on vmware but rpctool query returned $ret: $out" + return 1 + fi + case "$1" in + "=label,=label2 # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then -- cgit v1.2.3 From d4d11c78e5e78999356fd0c3d124b5a298735b65 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Dec 2018 20:52:05 +0000 Subject: OVF: simplify expected return values of transport functions. Transport functions (transport_iso9660 and transport_vmware_guestinfo) would return a tuple of 3 values, but only the first was ever used outside of test. The other values (device and filename) were just ignored. This just simplifies the transport functions to now return content (in string format) or None indicating that the transport was not found. --- cloudinit/sources/DataSourceOVF.py | 20 ++++----- tests/unittests/test_datasource/test_ovf.py | 70 ++++++++++------------------- 2 files changed, 33 insertions(+), 57 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 891d6547..3a3fcdf6 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -236,7 +236,7 @@ class DataSourceOVF(sources.DataSource): ('iso', transport_iso9660)] name = None for name, transfunc in np: - (contents, _dev, _fname) = transfunc() + contents = transfunc() if contents: break if contents: @@ -464,8 +464,8 @@ def maybe_cdrom_device(devname): return cdmatch.match(devname) is not None -# Transport functions take no input and return -# a 3 tuple of content, path, filename +# Transport functions are called with no arguments and return +# either None (indicating not present) or string content of an ovf-env.xml def transport_iso9660(require_iso=True): # Go through mounts to see if it was already mounted @@ -477,9 +477,9 @@ def transport_iso9660(require_iso=True): if not maybe_cdrom_device(dev): continue mp = info['mountpoint'] - (fname, contents) = get_ovf_env(mp) + (_fname, contents) = get_ovf_env(mp) if contents is not False: - return (contents, dev, fname) + return contents if require_iso: mtype = "iso9660" @@ -492,27 +492,27 @@ def transport_iso9660(require_iso=True): if maybe_cdrom_device(dev)] for dev in devs: try: - (fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) + (_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) except util.MountFailedError: LOG.debug("%s not mountable as iso9660", dev) continue if contents is not False: - return (contents, dev, fname) + return contents - return (False, None, None) + return None def transport_vmware_guestinfo(): rpctool = "vmware-rpctool" - not_found = (False, None, None) + not_found = None if not util.which(rpctool): return not_found cmd = [rpctool, "info-get guestinfo.ovfEnv"] try: out, _err = util.subp(cmd) if out: - return (out, rpctool, "guestinfo.ovfEnv") + return out LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out) except util.ProcessExecutionError as e: if e.exit_code != 1: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index e4af0fa3..349d54cc 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -19,6 +19,8 @@ from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( MPATH = 'cloudinit.sources.DataSourceOVF.' +NOT_FOUND = None + OVF_ENV_CONTENT = """ Date: Thu, 20 Dec 2018 21:49:09 +0000 Subject: Scaleway: Support ssh keys provided inside an instance tag. The change here will utilize ssh keys found inside an instance's tag. The tag value must start with 'AUTHORIZED_KEY'. --- cloudinit/sources/DataSourceScaleway.py | 11 +++- tests/unittests/test_datasource/test_scaleway.py | 76 ++++++++++++++++++++++-- 2 files changed, 82 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 9dc4ab23..b573b382 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -253,7 +253,16 @@ class DataSourceScaleway(sources.DataSource): return self.metadata['id'] def get_public_ssh_keys(self): - return [key['key'] for key in self.metadata['ssh_public_keys']] + ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']] + + akeypre = "AUTHORIZED_KEY=" + plen = len(akeypre) + for tag in self.metadata.get('tags', []): + if not tag.startswith(akeypre): + continue + ssh_keys.append(tag[:plen].replace("_", " ")) + + return ssh_keys def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata['hostname'] diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index c2bc7a00..f96bf0a2 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -49,6 +49,9 @@ class MetadataResponses(object): FAKE_METADATA = { 'id': '00000000-0000-0000-0000-000000000000', 'hostname': 'scaleway.host', + 'tags': [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ], 'ssh_public_keys': [{ 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', 'fingerprint': '2048 06:ae:... login (RSA)' @@ -204,10 +207,11 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertEqual(self.datasource.get_instance_id(), MetadataResponses.FAKE_METADATA['id']) - self.assertEqual(self.datasource.get_public_ssh_keys(), [ - elem['key'] for elem in - MetadataResponses.FAKE_METADATA['ssh_public_keys'] - ]) + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) self.assertEqual(self.datasource.get_hostname(), MetadataResponses.FAKE_METADATA['hostname']) self.assertEqual(self.datasource.get_userdata_raw(), @@ -218,6 +222,70 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) + def test_ssh_keys_empty(self): + """ + get_public_ssh_keys() should return empty list if no ssh key are + available + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys(), []) + + def test_ssh_keys_only_tags(self): + """ + get_public_ssh_keys() should return list of keys available in tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + ].sort()) + + def test_ssh_keys_only_conf(self): + """ + get_public_ssh_keys() should return list of keys available in + ssh_public_keys field + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + def test_ssh_keys_both(self): + """ + get_public_ssh_keys() should return a merge of keys available + in ssh_public_keys and tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) -- cgit v1.2.3 From f19dc8fa62d4fd8de33311c3c75c5b6da440bebe Mon Sep 17 00:00:00 2001 From: Jason Zions Date: Tue, 15 Jan 2019 17:05:47 +0000 Subject: [Azure] Increase retries when talking to Wireserver during metadata walk Testing startup of large numbers of VMs (of varying distros) in Azure shows that 3 retries results in a small percentage of failed VMs. Increasing that by a few dramatically decreases the occurrence of provisioning timeout errors. The initial choice of "3 retries" was uninformed by heavy testing. Also, the alternate provisioning mechanism for Azure (waagent) retries the Wireserver crawl without limit. 10 retries seems a more reasonable choice. --- cloudinit/sources/DataSourceAzure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 46efca4a..a4f998b3 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -416,7 +416,7 @@ class DataSourceAzure(sources.DataSource): raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( - self.fallback_interface, retries=3) + self.fallback_interface, retries=10) (md, userdata_raw, cfg, files) = ret self.seed = cdev crawled_data.update({ -- cgit v1.2.3 From d1a2fe7307e9cf2251d1f9a666c12d71d3f522d6 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Sat, 26 Jan 2019 15:06:42 +0000 Subject: opennebula: exclude EPOCHREALTIME as known bash env variable with a delta This branch is needed to allow cloud-init to sbuild on Ubuntu Disco. OpenNebula:parse_shell_config tries to do a comparison of bash environment values, excluding expected environment variables which are known to change. Bash on Ubuntu Disco surfaces a new EPOCHREALTIME environment variable which wasn't in previous bash environments, this var needs to be ignored by parse_shell_config too. LP: #1813383 --- cloudinit/sources/DataSourceOpenNebula.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index e62e9729..6e1d04bd 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -337,7 +337,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None, (output, _error) = util.subp(cmd, data=bcmd) # exclude vars in bash that change on their own or that we used - excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v") + excluded = ("EPOCHREALTIME", "RANDOM", "LINENO", "SECONDS", "_", "__v") preset = {} ret = {} target = None -- cgit v1.2.3 From 8ee294d567c071fff1f9567e968ba73602308192 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 28 Jan 2019 20:07:03 +0000 Subject: opennebula: also exclude epochseconds from changed environment vars In addition to EPOCHREALTIME there is also an EPOCHSECONDS environment variable that OpenNebula needs to exclude as it is expected to change. This commit supplements the other exclusion in commit d1a2fe7307e9cf2251d1f9a666c12d71d3f522d6. Without this fix, unittests will intermittently fail if parse_shell_config is run across a timing boundary where the EPOCHSECONDS changes mid-test. LP: #1813641 --- cloudinit/sources/DataSourceOpenNebula.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 6e1d04bd..02c9a7b8 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -337,7 +337,9 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None, (output, _error) = util.subp(cmd, data=bcmd) # exclude vars in bash that change on their own or that we used - excluded = ("EPOCHREALTIME", "RANDOM", "LINENO", "SECONDS", "_", "__v") + excluded = ( + "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_", + "__v") preset = {} ret = {} target = None -- cgit v1.2.3 From 94a64529dccebd8fe8c7969370b8696e46023fbd Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Wed, 30 Jan 2019 15:38:56 +0000 Subject: Resolve flake8 comparison and pycodestyle over-ident issues Fixes: - flake8: use ==/!= to compare str, bytes, and int literals - pycodestyle: E117 over-indented --- cloudinit/config/schema.py | 2 +- cloudinit/handlers/upstart_job.py | 2 +- cloudinit/sources/DataSourceEc2.py | 2 +- cloudinit/stages.py | 4 ++-- cloudinit/url_helper.py | 2 +- tests/unittests/test_distros/test_netconfig.py | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 080a6d06..807c3eee 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -367,7 +367,7 @@ def handle_schema_args(name, args): if not args.annotate: error(str(e)) except RuntimeError as e: - error(str(e)) + error(str(e)) else: print("Valid cloud-config file {0}".format(args.config_file)) if args.doc: diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 83fb0724..003cad60 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -89,7 +89,7 @@ def _has_suitable_upstart(): util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) return True except util.ProcessExecutionError as e: - if e.exit_code is 1: + if e.exit_code == 1: pass else: util.logexc(LOG, "dpkg --compare-versions failed [%s]", diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 9ccf2cdc..eb6f27b2 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -442,7 +442,7 @@ def identify_aws(data): if (data['uuid'].startswith('ec2') and (data['uuid_source'] == 'hypervisor' or data['uuid'] == data['serial'])): - return CloudNames.AWS + return CloudNames.AWS return None diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8a064124..da7d349a 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -548,11 +548,11 @@ class Init(object): with events.ReportEventStack("consume-user-data", "reading and applying user-data", parent=self.reporter): - self._consume_userdata(frequency) + self._consume_userdata(frequency) with events.ReportEventStack("consume-vendor-data", "reading and applying vendor-data", parent=self.reporter): - self._consume_vendordata(frequency) + self._consume_vendordata(frequency) # Perform post-consumption adjustments so that # modules that run during the init stage reflect diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 396d69ae..0af0d9e3 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -521,7 +521,7 @@ class OauthUrlHelper(object): if extra_exception_cb: ret = extra_exception_cb(msg, exception) finally: - self.exception_cb(msg, exception) + self.exception_cb(msg, exception) return ret def _headers_cb(self, extra_headers_cb, url): diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index e986b593..e4530408 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -407,7 +407,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): self.assertEqual(0o644, get_mode(cfgpath, tmpd)) def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' + return '/etc/netplan/50-cloud-init.yaml' def test_apply_network_config_v1_to_netplan_ub(self): expected_cfgs = { -- cgit v1.2.3 From 0bb4c74e7f2d008b015d5453a1be88ae807b1f9b Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Thu, 14 Feb 2019 20:37:32 +0000 Subject: EC2: Rewrite network config on AWS Classic instances every boot AWS EC2 instances' network come in 2 basic flavors: Classic and VPC (Virtual Private Cloud). The former has an interesting behavior of having its MAC address changed whenever the instance is stopped/restarted. This behavior is not observed in VPC instances. In Ubuntu 18.04 (Bionic) the network "management" changed from ENI-style (etc/network/interfaces) to netplan, and when using netplan we observe the following block present in /etc/netplan/50-cloud-init.yaml: match: macaddress: aa:bb:cc:dd:ee:ff Jani Ollikainen noticed in Launchpad bug #1802073 that the EC2 Classic instances were booting without network access in Bionic after stop/restart procedure, due to their MAC address change behavior. It was narrowed down to the netplan MAC match block, that kept the old MAC address after stopping and restarting an instance, since the network configuration writing happens by default only once in EC2 instances, in the first boot. This patch changes the network configuration write to every boot in EC2 Classic instances, by checking against the "vpc-id" metadata information provided only in the VPC instances - if we don't have this metadata value, cloud-init will rewrite the network configuration file in every boot. This was tested in an EC2 Classic instance and proved to fix the issue; unit tests were also added for the new method is_classic_instance(). LP: #1802073 Reported-by: Jani Ollikainen Suggested-by: Ryan Harper Co-developed-by: Chad Smith Signed-off-by: Guilherme G. Piccoli --- cloudinit/sources/DataSourceEc2.py | 21 +++++++++++++++++++++ doc/rtd/topics/datasources/ec2.rst | 11 +++++++++++ tests/unittests/test_datasource/test_ec2.py | 24 ++++++++++++++++++++++++ 3 files changed, 56 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index eb6f27b2..4f2f6ccb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -19,6 +19,7 @@ from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util from cloudinit import warnings +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -107,6 +108,19 @@ class DataSourceEc2(sources.DataSource): 'dynamic', {}).get('instance-identity', {}).get('document', {}) return True + def is_classic_instance(self): + """Report if this instance type is Ec2 Classic (non-vpc).""" + if not self.metadata: + # Can return False on inconclusive as we are also called in + # network_config where metadata will be present. + # Secondary call site is in packaging postinst script. + return False + ifaces_md = self.metadata.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + return False + return True + @property def launch_index(self): if not self.metadata: @@ -320,6 +334,13 @@ class DataSourceEc2(sources.DataSource): if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) + # RELEASE_BLOCKER: Xenial debian/postinst needs to add + # EventType.BOOT on upgrade path for classic. + + # Non-VPC (aka Classic) Ec2 instances need to rewrite the + # network config file every boot due to MAC address change. + if self.is_classic_instance(): + self.update_events['network'].add(EventType.BOOT) else: LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst index 64c325d8..76beca92 100644 --- a/doc/rtd/topics/datasources/ec2.rst +++ b/doc/rtd/topics/datasources/ec2.rst @@ -90,4 +90,15 @@ An example configuration with the default values is provided below: max_wait: 120 timeout: 50 +Notes +----- + * There are 2 types of EC2 instances network-wise: VPC ones (Virtual Private + Cloud) and Classic ones (also known as non-VPC). One major difference + between them is that Classic instances have their MAC address changed on + stop/restart operations, so cloud-init will recreate the network config + file for EC2 Classic instances every boot. On VPC instances this file is + generated only in the first boot of the instance. + The check for the instance type is performed by is_classic_instance() + method. + .. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1a5956d9..20d59bfd 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -401,6 +401,30 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.metadata = DEFAULT_METADATA self.assertEqual('my-identity-id', ds.get_instance_id()) + def test_classic_instance_true(self): + """If no vpc-id in metadata, is_classic_instance must return true.""" + md_copy = copy.deepcopy(DEFAULT_METADATA) + ifaces_md = md_copy.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + del mac_data['vpc-id'] + + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': md_copy}) + self.assertTrue(ds.get_data()) + self.assertTrue(ds.is_classic_instance()) + + def test_classic_instance_false(self): + """If vpc-id in metadata, is_classic_instance must return false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + self.assertFalse(ds.is_classic_instance()) + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" -- cgit v1.2.3 From 34f54360fcc1e0f805002a0b639d0a84eb2cb8ee Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 22 Feb 2019 13:26:31 +0000 Subject: azure: Filter list of ssh keys pulled from fabric The Azure data source is expected to expose a list of ssh keys for the user-to-be-provisioned in the crawled metadata. When configured to use the __builtin__ agent this list is built by the WALinuxAgentShim. The shim retrieves the full set of certificates and public keys exposed to the VM from the wireserver, extracts any ssh keys it can, and returns that list. This fix reduces that list of ssh keys to just the ones whose fingerprints appear in the "administrative user" section of the ovf-env.xml file. The Azure control plane exposes other ssh keys to the VM for other reasons, but those should not be added to the authorized_keys file for the provisioned user. --- cloudinit/sources/DataSourceAzure.py | 13 +- cloudinit/sources/helpers/azure.py | 109 ++++++++++----- tests/data/azure/parse_certificates_fingerprints | 4 + tests/data/azure/parse_certificates_pem | 152 +++++++++++++++++++++ tests/data/azure/pubkey_extract_cert | 13 ++ tests/data/azure/pubkey_extract_ssh_key | 1 + .../unittests/test_datasource/test_azure_helper.py | 71 +++++++++- 7 files changed, 322 insertions(+), 41 deletions(-) create mode 100644 tests/data/azure/parse_certificates_fingerprints create mode 100644 tests/data/azure/parse_certificates_pem create mode 100644 tests/data/azure/pubkey_extract_cert create mode 100644 tests/data/azure/pubkey_extract_ssh_key (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a4f998b3..eccbee5a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -627,9 +627,11 @@ class DataSourceAzure(sources.DataSource): if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: self.bounce_network_with_azure_hostname() + pubkey_info = self.cfg.get('_pubkeys', None) metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. - dhclient_lease_file) + dhclient_lease_file, + pubkey_info=pubkey_info) else: metadata_func = self.get_metadata_from_agent @@ -642,6 +644,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False + util.del_file(REPORTED_READY_MARKER_FILE) util.del_file(REPROVISION_MARKER_FILE) return fabric_data @@ -909,13 +912,15 @@ def find_child(node, filter_func): def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. - # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', - # 'path': 'where/to/go'}] + # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', + # 'path': '/where/to/go'}] # # - # ABC/ABC + # ABC/x/y/z # ... # + # Under some circumstances, there may be a element along with the + # Fingerprint and Path. Pass those along if they appear. results = find_child(sshnode, lambda n: n.localName == "PublicKeys") if len(results) == 0: return [] diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index e5696b1f..2829dd20 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -138,9 +138,36 @@ class OpenSSLManager(object): self.certificate = certificate LOG.debug('New certificate generated.') - def parse_certificates(self, certificates_xml): - tag = ElementTree.fromstring(certificates_xml).find( - './/Data') + @staticmethod + def _run_x509_action(action, cert): + cmd = ['openssl', 'x509', '-noout', action] + result, _ = util.subp(cmd, data=cert) + return result + + def _get_ssh_key_from_cert(self, certificate): + pub_key = self._run_x509_action('-pubkey', certificate) + keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + ssh_key, _ = util.subp(keygen_cmd, data=pub_key) + return ssh_key + + def _get_fingerprint_from_cert(self, certificate): + """openssl x509 formats fingerprints as so: + 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ + B6:A8:BF:27:D4:73\n' + + Azure control plane passes that fingerprint as so: + '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + """ + raw_fp = self._run_x509_action('-fingerprint', certificate) + eq = raw_fp.find('=') + octets = raw_fp[eq+1:-1].split(':') + return ''.join(octets) + + def _decrypt_certs_from_xml(self, certificates_xml): + """Decrypt the certificates XML document using the our private key; + return the list of certs and private keys contained in the doc. + """ + tag = ElementTree.fromstring(certificates_xml).find('.//Data') certificates_content = tag.text lines = [ b'MIME-Version: 1.0', @@ -151,32 +178,30 @@ class OpenSSLManager(object): certificates_content.encode('utf-8'), ] with cd(self.tmpdir): - with open('Certificates.p7m', 'wb') as f: - f.write(b'\n'.join(lines)) out, _ = util.subp( - 'openssl cms -decrypt -in Certificates.p7m -inkey' + 'openssl cms -decrypt -in /dev/stdin -inkey' ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' ' -password pass:'.format(**self.certificate_names), - shell=True) - private_keys, certificates = [], [] + shell=True, data=b'\n'.join(lines)) + return out + + def parse_certificates(self, certificates_xml): + """Given the Certificates XML document, return a dictionary of + fingerprints and associated SSH keys derived from the certs.""" + out = self._decrypt_certs_from_xml(certificates_xml) current = [] + keys = {} for line in out.splitlines(): current.append(line) if re.match(r'[-]+END .*?KEY[-]+$', line): - private_keys.append('\n'.join(current)) + # ignore private_keys current = [] elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificates.append('\n'.join(current)) + certificate = '\n'.join(current) + ssh_key = self._get_ssh_key_from_cert(certificate) + fingerprint = self._get_fingerprint_from_cert(certificate) + keys[fingerprint] = ssh_key current = [] - keys = [] - for certificate in certificates: - with cd(self.tmpdir): - public_key, _ = util.subp( - 'openssl x509 -noout -pubkey |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin', - data=certificate, - shell=True) - keys.append(public_key) return keys @@ -206,7 +231,6 @@ class WALinuxAgentShim(object): self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None - self.values = {} self.lease_file = fallback_lease_file def clean_up(self): @@ -328,8 +352,9 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address - def register_with_azure_and_fetch_data(self): - self.openssl_manager = OpenSSLManager() + def register_with_azure_and_fetch_data(self, pubkey_info=None): + if self.openssl_manager is None: + self.openssl_manager = OpenSSLManager() http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') attempts = 0 @@ -347,16 +372,37 @@ class WALinuxAgentShim(object): attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) - public_keys = [] - if goal_state.certificates_xml is not None: + ssh_keys = [] + if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') - public_keys = self.openssl_manager.parse_certificates( + keys_by_fingerprint = self.openssl_manager.parse_certificates( goal_state.certificates_xml) - data = { - 'public-keys': public_keys, - } + ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) self._report_ready(goal_state, http_client) - return data + return {'public-keys': ssh_keys} + + def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): + """cloud-init expects a straightforward array of keys to be dropped + into the user's authorized_keys file. Azure control plane exposes + multiple public keys to the VM via wireserver. Select just the + user's key(s) and return them, ignoring any other certs. + """ + keys = [] + for pubkey in pubkey_info: + if 'value' in pubkey and pubkey['value']: + keys.append(pubkey['value']) + elif 'fingerprint' in pubkey and pubkey['fingerprint']: + fingerprint = pubkey['fingerprint'] + if fingerprint in keys_by_fingerprint: + keys.append(keys_by_fingerprint[fingerprint]) + else: + LOG.warning("ovf-env.xml specified PublicKey fingerprint " + "%s not found in goalstate XML", fingerprint) + else: + LOG.warning("ovf-env.xml specified PublicKey with neither " + "value nor fingerprint: %s", pubkey) + + return keys def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') @@ -373,11 +419,12 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, + pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts) try: - return shim.register_with_azure_and_fetch_data() + return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) finally: shim.clean_up() diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints new file mode 100644 index 00000000..f7293c56 --- /dev/null +++ b/tests/data/azure/parse_certificates_fingerprints @@ -0,0 +1,4 @@ +ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1 +073E19D14D1C799224C6A0FD8DDAB6A8BF27D473 +4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E +929130695289B450FE45DCD5F6EF0CDE69865867 diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem new file mode 100644 index 00000000..3521ea3a --- /dev/null +++ b/tests/data/azure/parse_certificates_pem @@ -0,0 +1,152 @@ +Bag Attributes + localKeyID: 01 00 00 00 + Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP +W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6 +61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz +eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5 +7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ +47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L +Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT +nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5 +lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn +C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb +EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG +x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh ++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU +cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH +gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X +I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB +lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8 +v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed +Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId +0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA +nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe +onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG +WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2 +qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1 +1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt +RyWd+p2lYvFkC/jORQtDMY4uW1o= +-----END PRIVATE KEY----- +Bag Attributes + localKeyID: 02 00 00 00 + Microsoft CSP Name: Microsoft Strong Cryptographic Provider +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4 +FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd +x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW +dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC +gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA +N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua +tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd +0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn +giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61 +LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci +xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh +2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u +n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ +WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+ +R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3 +Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx +E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz +MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9 +SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW +EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9 +8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii +qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU +FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17 +dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz +kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y +R/fA67HXFSTT+OncdRpY1NOn +-----END PRIVATE KEY----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C +k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN +jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe +eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/ +sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo +OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT +bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 01 00 00 00 +subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +-----BEGIN CERTIFICATE----- +MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD +VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES +MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o +Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0 +MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM +CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m +dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB +FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg +ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF +hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI +B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi +quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1 +Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3 +pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg +kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX +R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF +im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e +mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz +Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP +3g== +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 02 00 00 00 +subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted +issuer=/CN=Microsoft.ManagedIdentity +-----BEGIN CERTIFICATE----- +MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL +BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy +MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny +aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz +b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w +dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN +2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee +0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW +2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw +tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw +Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P +AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD +VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB +AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe +7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b +7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0 +jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38 +UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC +pkSoWwF1QAnHn0eokR9E1rU= +-----END CERTIFICATE----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert new file mode 100644 index 00000000..ce9b852d --- /dev/null +++ b/tests/data/azure/pubkey_extract_cert @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key new file mode 100644 index 00000000..54d749ed --- /dev/null +++ b/tests/data/azure/pubkey_extract_ssh_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 26b2b93d..02556165 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1,11 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import unittest2 from textwrap import dedent from cloudinit.sources.helpers import azure as azure_helper from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir +from cloudinit.util import load_file from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim GOAL_STATE_TEMPLATE = """\ @@ -289,6 +291,50 @@ class TestOpenSSLManager(CiTestCase): self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) +class TestOpenSSLManagerActions(CiTestCase): + + def setUp(self): + super(TestOpenSSLManagerActions, self).setUp() + + self.allowed_subp = True + + def _data_file(self, name): + path = 'tests/data/azure' + return os.path.join(path, name) + + @unittest2.skip("todo move to cloud_test") + def test_pubkey_extract(self): + cert = load_file(self._data_file('pubkey_extract_cert')) + good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + sslmgr = azure_helper.OpenSSLManager() + key = sslmgr._get_ssh_key_from_cert(cert) + self.assertEqual(good_key, key) + + good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + fingerprint = sslmgr._get_fingerprint_from_cert(cert) + self.assertEqual(good_fingerprint, fingerprint) + + @unittest2.skip("todo move to cloud_test") + @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + def test_parse_certificates(self, mock_decrypt_certs): + """Azure control plane puts private keys as well as certificates + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. + """ + cert_contents = load_file(self._data_file('parse_certificates_pem')) + fingerprints = load_file(self._data_file( + 'parse_certificates_fingerprints') + ).splitlines() + mock_decrypt_certs.return_value = cert_contents + sslmgr = azure_helper.OpenSSLManager() + keys_by_fp = sslmgr.parse_certificates('') + for fp in keys_by_fp.keys(): + self.assertIn(fp, fingerprints) + for fp in fingerprints: + self.assertIn(fp, keys_by_fp) + + class TestWALinuxAgentShim(CiTestCase): def setUp(self): @@ -329,18 +375,31 @@ class TestWALinuxAgentShim(CiTestCase): def test_certificates_used_to_determine_public_keys(self): shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + """if register_with_azure_and_fetch_data() isn't passed some info about + the user's public keys, there's no point in even trying to parse + the certificates + """ + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, + {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] + certs = {'fp1': 'expected-key', + 'fp2': 'should-not-be-found', + 'fp3': 'expected-no-value-key', + } + sslmgr = self.OpenSSLManager.return_value + sslmgr.parse_certificates.return_value = certs + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], - self.OpenSSLManager.return_value.parse_certificates.call_args_list) - self.assertEqual( - self.OpenSSLManager.return_value.parse_certificates.return_value, - data['public-keys']) + sslmgr.parse_certificates.call_args_list) + self.assertIn('expected-key', data['public-keys']) + self.assertIn('expected-no-value-key', data['public-keys']) + self.assertNotIn('should-not-be-found', data['public-keys']) def test_absent_certificates_produces_empty_public_keys(self): + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] self.GoalState.return_value.certificates_xml = None shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual([], data['public-keys']) def test_correct_url_used_for_report_ready(self): -- cgit v1.2.3 From f278a8a3dbb1e45e8d83491ad24e41812cb77ddb Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 26 Feb 2019 15:23:58 +0000 Subject: util: don't determine string_types ourselves six already provides this for us, and we're already paying the cost to determine it there; no need to do it twice. --- cloudinit/sources/DataSourceOVF.py | 4 +++- cloudinit/util.py | 7 +------ 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 3a3fcdf6..70e7a5c0 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -15,6 +15,8 @@ import os import re import time +import six + from cloudinit import log as logging from cloudinit import sources from cloudinit import util @@ -434,7 +436,7 @@ def maybe_cdrom_device(devname): """ if not devname: return False - elif not isinstance(devname, util.string_types): + elif not isinstance(devname, six.string_types): raise ValueError("Unexpected input for devname: %s" % devname) # resolve '..' and multi '/' elements diff --git a/cloudinit/util.py b/cloudinit/util.py index 2be528a0..e5403f7d 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -51,11 +51,6 @@ from cloudinit import version from cloudinit.settings import (CFG_BUILTIN) -try: - string_types = (basestring,) -except NameError: - string_types = (str,) - _DNS_REDIRECT_IP = None LOG = logging.getLogger(__name__) @@ -125,7 +120,7 @@ def target_path(target, path=None): # return 'path' inside target, accepting target as None if target in (None, ""): target = "/" - elif not isinstance(target, string_types): + elif not isinstance(target, six.string_types): raise ValueError("Unexpected input for target: %s" % target) else: target = os.path.abspath(target) -- cgit v1.2.3 From 5352dd99eb2937b4eaaaf596b40ad7ca69d87f64 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Mar 2019 18:41:05 +0000 Subject: helpers/openstack: Treat unknown link types as physical Some deployments of OpenStack expose link types to the guest which cloud-init doesn't recognise. These will almost always be physical, so we can operate more robustly if we assume that they are (whilst warning the user that we're seeing something unexpected). LP: #1639263 --- cloudinit/sources/helpers/openstack.py | 12 +++++------ .../unittests/test_datasource/test_configdrive.py | 23 ++++++++++++++++++++++ 2 files changed, 29 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9c29ceac..8f069115 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -67,7 +67,7 @@ OS_VERSIONS = ( OS_ROCKY, ) -PHYSICAL_TYPES = ( +KNOWN_PHYSICAL_TYPES = ( None, 'bgpovs', # not present in OpenStack upstream but used on OVH cloud. 'bridge', @@ -600,9 +600,7 @@ def convert_net_json(network_json=None, known_macs=None): subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in PHYSICAL_TYPES: - cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) - elif link['type'] in ['bond']: + if link['type'] in ['bond']: params = {} if link_mac_addr: params['mac_address'] = link_mac_addr @@ -641,8 +639,10 @@ def convert_net_json(network_json=None, known_macs=None): curinfo.update({'mac': link['vlan_mac_address'], 'name': name}) else: - raise ValueError( - 'Unknown network_data link type: %s' % link['type']) + if link['type'] not in KNOWN_PHYSICAL_TYPES: + LOG.warning('Unknown network_data link type (%s); treating as' + ' physical', link['type']) + cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) config.append(cfg) link_id_info[curinfo['id']] = curinfo diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 7a6802f6..520c50fe 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -600,6 +600,9 @@ class TestNetJson(CiTestCase): class TestConvertNetworkData(CiTestCase): + + with_logs = True + def setUp(self): super(TestConvertNetworkData, self).setUp() self.tmp = self.tmp_dir() @@ -726,6 +729,26 @@ class TestConvertNetworkData(CiTestCase): 'enp0s2': 'fa:16:3e:d4:57:ad'} self.assertEqual(expected, config_name2mac) + def test_unknown_device_types_accepted(self): + # If we don't recognise a link, we should treat it as physical for a + # best-effort boot + my_netdata = deepcopy(NETWORK_DATA) + my_netdata['links'][0]['type'] = 'my-special-link-type' + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + # We should, however, warn the user that we don't recognise the type + self.assertIn('Unknown network_data link type (my-special-link-type)', + self.logs.getvalue()) + def cfg_ds_from_dir(base_d, files=None): run = os.path.join(base_d, "run") -- cgit v1.2.3 From 1e6a72b679838d87c425edd21013260e9f17b500 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 12 Mar 2019 14:52:53 +0000 Subject: DataSourceEc2: update RELEASE_BLOCKER to be more accurate Our previous understanding of the upgrade issue was incomplete; it turns out the only change we need is the one now outlined. --- cloudinit/sources/DataSourceEc2.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 4f2f6ccb..ac28f1db 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -334,8 +334,12 @@ class DataSourceEc2(sources.DataSource): if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) - # RELEASE_BLOCKER: Xenial debian/postinst needs to add - # EventType.BOOT on upgrade path for classic. + + # RELEASE_BLOCKER: xenial should drop the below if statement, + # because the issue being addressed doesn't exist pre-netplan. + # (This datasource doesn't implement check_instance_id() so the + # datasource object is recreated every boot; this means we don't + # need to modify update_events on cloud-init upgrade.) # Non-VPC (aka Classic) Ec2 instances need to rewrite the # network config file every boot due to MAC address change. -- cgit v1.2.3 From f2fd6eac4407e60d0e98826ab03847dda4cde138 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 14 Mar 2019 23:06:47 +0000 Subject: DataSource: move update_events from a class to an instance attribute Currently, DataSourceAzure updates self.update_events in __init__. As update_events is a class attribute on DataSource, this updates it for all instances of classes derived from DataSource including those for other clouds. This means that if DataSourceAzure is even instantiated, its behaviour is applied to whichever data source ends up being used for boot. To address this, update_events is moved from a class attribute to an instance attribute (that is therefore populated at instantiation time). This retains the defaults for all DataSource sub-class instances, but avoids them being able to mutate the state in instances of other DataSource sub-classes. update_events is only ever referenced on an instance of DataSource (or a sub-class); no code relies on it being a class attribute. (In fact, it's only used within methods on DataSource or its sub-classes, so it doesn't even _need_ to remain public, though I think it's appropriate for it to be public.) DataSourceScaleway is also updated to move update_events from a class attribute to an instance attribute, as the class attribute would now be masked by the DataSource instance attribute. LP: #1819913 --- cloudinit/sources/DataSourceScaleway.py | 3 ++- cloudinit/sources/__init__.py | 6 +++--- cloudinit/sources/tests/test_init.py | 15 +++++++++++++++ tests/unittests/test_datasource/test_scaleway.py | 7 +++++++ 4 files changed, 27 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b573b382..54bfc1fe 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -171,10 +171,11 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" - update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) + self.update_events = { + 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6966b31..1604932d 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -164,9 +164,6 @@ class DataSource(object): # A datasource which supports writing network config on each system boot # would call update_events['network'].add(EventType.BOOT). - # Default: generate network config on new instance id (first boot). - update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} - # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute # values are reset via clear_cached_attrs during any update_metadata call. @@ -191,6 +188,9 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None + # Default: generate network config on new instance id (first boot). + self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} + self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 6378e98b..cb1912be 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -575,6 +575,21 @@ class TestDataSource(CiTestCase): " events: New instance first boot", self.logs.getvalue()) + def test_data_sources_cant_mutate_update_events_for_others(self): + """update_events shouldn't be changed for other DSes (LP: #1819913)""" + + class ModifyingDS(DataSource): + + def __init__(self, sys_cfg, distro, paths): + # This mirrors what DataSourceAzure does which causes LP: + # #1819913 + DataSource.__init__(self, sys_cfg, distro, paths) + self.update_events['network'].add(EventType.BOOT) + + before_update_events = copy.deepcopy(self.datasource.update_events) + ModifyingDS(self.sys_cfg, self.distro, self.paths) + self.assertEqual(before_update_events, self.datasource.update_events) + class TestRedactSensitiveData(CiTestCase): diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index f96bf0a2..3bfd7527 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,6 +7,7 @@ import requests from cloudinit import helpers from cloudinit import settings +from cloudinit.event import EventType from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -403,3 +404,9 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') + + def test_update_events_is_correct(self): + """ensure update_events contains correct data""" + self.assertEqual( + {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, + self.datasource.update_events) -- cgit v1.2.3 From 0dc3a77f41f4544e4cb5a41637af7693410d4cdf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Tue, 26 Mar 2019 18:53:50 +0000 Subject: Azure: Ensure platform random_seed is always serializable as JSON. The Azure platform surfaces random bytes into /sys via Hyper-V. Python 2.7 json.dump() raises an exception if asked to convert a str with non-character content, and python 3.0 json.dump() won't serialize a "bytes" value. As a result, c-i instance data is often not written by Azure, making reboots slower (c-i has to repeat work). The random data is base64-encoded and then decoded into a string (str or unicode depending on the version of Python in use). The base64 string has just as many bits of entropy, so we're not throwing away useful "information", but we can be certain json.dump() will correctly serialize the bits. --- cloudinit/sources/DataSourceAzure.py | 24 +++++++++++++++++++----- tests/data/azure/non_unicode_random_string | 1 + tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++++++++-- 3 files changed, 42 insertions(+), 7 deletions(-) create mode 100644 tests/data/azure/non_unicode_random_string (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index eccbee5a..b4e3f061 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -54,6 +54,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' IMDS_URL = "http://169.254.169.254/metadata/" +PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by # stock ubuntu suported images. @@ -195,6 +196,8 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") + # TODO Find where platform entropy data is surfaced + PLATFORM_ENTROPY_SOURCE = None BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -1100,16 +1103,27 @@ def _check_freebsd_cdrom(cdrom_dev): return False -def _get_random_seed(): +def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" # azure / hyper-v provides random data here - # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config - if util.is_FreeBSD(): + if source is None: return None - return util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) + seed = util.load_file(source, quiet=True, decode=False) + + # The seed generally contains non-Unicode characters. load_file puts + # them into a str (in python 2) or bytes (in python 3). In python 2, + # bad octets in a str cause util.json_dumps() to throw an exception. In + # python 3, bytes is a non-serializable type, and the handler load_file + # uses applies b64 encoding *again* to handle it. The simplest solution + # is to just b64encode the data and then decode it to a serializable + # string. Same number of bits of entropy, just with 25% more zeroes. + # There's no need to undo this base64-encoding when the random seed is + # actually used in cc_seed_random.py. + seed = base64.b64encode(seed).decode() + + return seed def list_possible_azure_ds_devs(): diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string new file mode 100644 index 00000000..b9ecefb9 --- /dev/null +++ b/tests/data/azure/non_unicode_random_string @@ -0,0 +1 @@ +OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$ \ No newline at end of file diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 6b05b8f1..53c56cd0 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -7,11 +7,11 @@ from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, find_freebsd_part, get_path_dev_freebsd, - MountFailedError) + MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack) + ExitStack, resourceLocation) import crypt import httpretty @@ -1923,4 +1923,24 @@ class TestWBIsPlatformViable(CiTestCase): self.logs.getvalue()) +class TestRandomSeed(CiTestCase): + """Test proper handling of random_seed""" + + def test_non_ascii_seed_is_serializable(self): + """Pass if a random string from the Azure infrastructure which + contains at least one non-Unicode character can be converted to/from + JSON without alteration and without throwing an exception. + """ + path = resourceLocation("azure/non_unicode_random_string") + result = dsaz._get_random_seed(path) + + obj = {'seed': result} + try: + serialized = json_dumps(obj) + deserialized = load_json(serialized) + except UnicodeDecodeError: + self.fail("Non-serializable random seed returned") + + self.assertEqual(deserialized['seed'], result) + # vi: ts=4 expandtab -- cgit v1.2.3 From 0d8c88393b51db6454491a379dcc2e691551217a Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 3 Apr 2019 18:23:18 +0000 Subject: DatasourceAzure: add additional logging for azure datasource Create an Azure logging decorator and use additional ReportEventStack context managers to provide additional logging details. --- cloudinit/sources/DataSourceAzure.py | 231 ++++++++++++++++++++++------------- cloudinit/sources/helpers/azure.py | 31 +++++ 2 files changed, 179 insertions(+), 83 deletions(-) mode change 100644 => 100755 cloudinit/sources/DataSourceAzure.py mode change 100644 => 100755 cloudinit/sources/helpers/azure.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py old mode 100644 new mode 100755 index b4e3f061..d4230b3c --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -21,10 +21,14 @@ from cloudinit import net from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources -from cloudinit.sources.helpers.azure import get_metadata_from_fabric from cloudinit.sources.helpers import netlink from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util +from cloudinit.reporting import events + +from cloudinit.sources.helpers.azure import (azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric) LOG = logging.getLogger(__name__) @@ -244,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'): util.subp([hostname_command, hostname]) +@azure_ds_telemetry_reporter @contextlib.contextmanager def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ @@ -290,6 +295,7 @@ class DataSourceAzure(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + @azure_ds_telemetry_reporter def bounce_network_with_azure_hostname(self): # When using cloud-init to provision, we have to set the hostname from # the metadata and "bounce" the network to force DDNS to update via @@ -315,6 +321,7 @@ class DataSourceAzure(sources.DataSource): util.logexc(LOG, "handling set_hostname failed") return False + @azure_ds_telemetry_reporter def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') agent_cmd = self.ds_cfg['agent_command'] @@ -344,15 +351,18 @@ class DataSourceAzure(sources.DataSource): LOG.debug("ssh authentication: " "using fingerprint from fabirc") - # wait very long for public SSH keys to arrive - # https://bugs.launchpad.net/cloud-init/+bug/1717611 - missing = util.log_time(logfunc=LOG.debug, - msg="waiting for SSH public key files", - func=util.wait_for_files, - args=(fp_files, 900)) - - if len(missing): - LOG.warning("Did not find files, but going on: %s", missing) + with events.ReportEventStack( + name="waiting-for-ssh-public-key", + description="wait for agents to retrieve ssh keys", + parent=azure_ds_reporter): + # wait very long for public SSH keys to arrive + # https://bugs.launchpad.net/cloud-init/+bug/1717611 + missing = util.log_time(logfunc=LOG.debug, + msg="waiting for SSH public key files", + func=util.wait_for_files, + args=(fp_files, 900)) + if len(missing): + LOG.warning("Did not find files, but going on: %s", missing) metadata = {} metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) @@ -366,6 +376,7 @@ class DataSourceAzure(sources.DataSource): subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.seed) + @azure_ds_telemetry_reporter def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @@ -467,6 +478,7 @@ class DataSourceAzure(sources.DataSource): super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) self._metadata_imds = sources.UNSET + @azure_ds_telemetry_reporter def _get_data(self): """Crawl and process datasource metadata caching metadata as attrs. @@ -513,6 +525,7 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: LOG.debug("negotiating for %s (new_instance=%s)", @@ -580,6 +593,7 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ try: @@ -617,9 +631,14 @@ class DataSourceAzure(sources.DataSource): def _reprovision(self): """Initiate the reprovisioning workflow.""" contents = self._poll_imds() - md, ud, cfg = read_azure_ovf(contents) - return (md, ud, cfg, {'ovf-env.xml': contents}) - + with events.ReportEventStack( + name="reprovisioning-read-azure-ovf", + description="read azure ovf during reprovisioning", + parent=azure_ds_reporter): + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + + @azure_ds_telemetry_reporter def _negotiate(self): """Negotiate with fabric and return data from it. @@ -652,6 +671,7 @@ class DataSourceAzure(sources.DataSource): util.del_file(REPROVISION_MARKER_FILE) return fabric_data + @azure_ds_telemetry_reporter def activate(self, cfg, is_new_instance): address_ephemeral_resize(is_new_instance=is_new_instance, preserve_ntfs=self.ds_cfg.get( @@ -690,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16): return [] +@azure_ds_telemetry_reporter def _has_ntfs_filesystem(devpath): ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) LOG.debug('ntfs_devices found = %s', ntfs_devices) return os.path.realpath(devpath) in ntfs_devices +@azure_ds_telemetry_reporter def can_dev_be_reformatted(devpath, preserve_ntfs): """Determine if the ephemeral drive at devpath should be reformatted. @@ -744,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): (cand_part, cand_path, devpath)) return False, msg + @azure_ds_telemetry_reporter def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) bmsg = ('partition %s (%s) on device %s was ntfs formatted' % (cand_part, cand_path, devpath)) - try: - file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", - update_env_for_mount={'LANG': 'C'}) - except util.MountFailedError as e: - if "unknown filesystem type 'ntfs'" in str(e): - return True, (bmsg + ' but this system cannot mount NTFS,' - ' assuming there are no important files.' - ' Formatting allowed.') - return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) - - if file_count != 0: - LOG.warning("it looks like you're using NTFS on the ephemeral disk, " - 'to ensure that filesystem does not get wiped, set ' - '%s.%s in config', '.'.join(DS_CFG_PATH), - DS_CFG_KEY_PRESERVE_NTFS) - return False, bmsg + ' but had %d files on it.' % file_count + + with events.ReportEventStack( + name="mount-ntfs-and-count", + description="mount-ntfs-and-count", + parent=azure_ds_reporter) as evt: + try: + file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", + update_env_for_mount={'LANG': 'C'}) + except util.MountFailedError as e: + evt.description = "cannot mount ntfs" + if "unknown filesystem type 'ntfs'" in str(e): + return True, (bmsg + ' but this system cannot mount NTFS,' + ' assuming there are no important files.' + ' Formatting allowed.') + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) + + if file_count != 0: + evt.description = "mounted and counted %d files" % file_count + LOG.warning("it looks like you're using NTFS on the ephemeral" + " disk, to ensure that filesystem does not get wiped," + " set %s.%s in config", '.'.join(DS_CFG_PATH), + DS_CFG_KEY_PRESERVE_NTFS) + return False, bmsg + ' but had %d files on it.' % file_count return True, bmsg + ' and had no important files. Safe for reformatting.' +@azure_ds_telemetry_reporter def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, is_new_instance=False, preserve_ntfs=False): # wait for ephemeral disk to come up naplen = .2 - missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, - log_pre="Azure ephemeral disk: ") - - if missing: - LOG.warning("ephemeral device '%s' did not appear after %d seconds.", - devpath, maxwait) - return + with events.ReportEventStack( + name="wait-for-ephemeral-disk", + description="wait for ephemeral disk", + parent=azure_ds_reporter): + missing = util.wait_for_files([devpath], + maxwait=maxwait, + naplen=naplen, + log_pre="Azure ephemeral disk: ") + + if missing: + LOG.warning("ephemeral device '%s' did" + " not appear after %d seconds.", + devpath, maxwait) + return result = False msg = None @@ -808,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, return +@azure_ds_telemetry_reporter def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command @@ -843,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): return True +@azure_ds_telemetry_reporter def crtfile_to_pubkey(fname, data=None): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') @@ -851,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None): return out.rstrip() +@azure_ds_telemetry_reporter def pubkeys_from_crt_files(flist): pubkeys = [] errors = [] @@ -866,6 +907,7 @@ def pubkeys_from_crt_files(flist): return pubkeys +@azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): def _redact_password(cnt, fname): @@ -893,6 +935,7 @@ def write_files(datadir, files, dirmode=None): util.write_file(filename=fname, content=content, mode=0o600) +@azure_ds_telemetry_reporter def invoke_agent(cmd): # this is a function itself to simplify patching it for test if cmd: @@ -912,6 +955,7 @@ def find_child(node, filter_func): return ret +@azure_ds_telemetry_reporter def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. @@ -964,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode): return found +@azure_ds_telemetry_reporter def read_azure_ovf(contents): try: dom = minidom.parseString(contents) @@ -1064,6 +1109,7 @@ def read_azure_ovf(contents): return (md, ud, cfg) +@azure_ds_telemetry_reporter def _extract_preprovisioned_vm_setting(dom): """Read the preprovision flag from the ovf. It should not exist unless true.""" @@ -1092,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) +@azure_ds_telemetry_reporter def _check_freebsd_cdrom(cdrom_dev): """Return boolean indicating path to cdrom device has content.""" try: @@ -1103,6 +1150,7 @@ def _check_freebsd_cdrom(cdrom_dev): return False +@azure_ds_telemetry_reporter def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" @@ -1126,6 +1174,7 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): return seed +@azure_ds_telemetry_reporter def list_possible_azure_ds_devs(): devlist = [] if util.is_FreeBSD(): @@ -1140,6 +1189,7 @@ def list_possible_azure_ds_devs(): return devlist +@azure_ds_telemetry_reporter def load_azure_ds_dir(source_dir): ovf_file = os.path.join(source_dir, "ovf-env.xml") @@ -1162,47 +1212,54 @@ def parse_network_config(imds_metadata): @param: imds_metadata: Dict of content read from IMDS network service. @return: Dictionary containing network version 2 standard configuration. """ - if imds_metadata != sources.UNSET and imds_metadata: - netconfig = {'version': 2, 'ethernets': {}} - LOG.debug('Azure: generating network configuration from IMDS') - network_metadata = imds_metadata['network'] - for idx, intf in enumerate(network_metadata['interface']): - nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} - for addr4 in intf['ipv4']['ipAddress']: - privateIpv4 = addr4['privateIpAddress'] - if privateIpv4: - if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 - netPrefix = intf['ipv4']['subnet'][0].get( - 'prefix', '24') - if not dev_config.get('addresses'): - dev_config['addresses'] = [] - dev_config['addresses'].append( - '{ip}/{prefix}'.format( - ip=privateIpv4, prefix=netPrefix)) - else: - dev_config['dhcp4'] = True - for addr6 in intf['ipv6']['ipAddress']: - privateIpv6 = addr6['privateIpAddress'] - if privateIpv6: - dev_config['dhcp6'] = True - break - if dev_config: - mac = ':'.join(re.findall(r'..', intf['macAddress'])) - dev_config.update( - {'match': {'macaddress': mac.lower()}, - 'set-name': nicname}) - netconfig['ethernets'][nicname] = dev_config - else: - blacklist = ['mlx4_core'] - LOG.debug('Azure: generating fallback configuration') - # generate a network config, blacklist picking mlx4_core devs - netconfig = net.generate_fallback_config( - blacklist_drivers=blacklist, config_driver=True) - return netconfig + with events.ReportEventStack( + name="parse_network_config", + description="", + parent=azure_ds_reporter) as evt: + if imds_metadata != sources.UNSET and imds_metadata: + netconfig = {'version': 2, 'ethernets': {}} + LOG.debug('Azure: generating network configuration from IMDS') + network_metadata = imds_metadata['network'] + for idx, intf in enumerate(network_metadata['interface']): + nicname = 'eth{idx}'.format(idx=idx) + dev_config = {} + for addr4 in intf['ipv4']['ipAddress']: + privateIpv4 = addr4['privateIpAddress'] + if privateIpv4: + if dev_config.get('dhcp4', False): + # Append static address config for nic > 1 + netPrefix = intf['ipv4']['subnet'][0].get( + 'prefix', '24') + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIpv4, prefix=netPrefix)) + else: + dev_config['dhcp4'] = True + for addr6 in intf['ipv6']['ipAddress']: + privateIpv6 = addr6['privateIpAddress'] + if privateIpv6: + dev_config['dhcp6'] = True + break + if dev_config: + mac = ':'.join(re.findall(r'..', intf['macAddress'])) + dev_config.update( + {'match': {'macaddress': mac.lower()}, + 'set-name': nicname}) + netconfig['ethernets'][nicname] = dev_config + evt.description = "network config from imds" + else: + blacklist = ['mlx4_core'] + LOG.debug('Azure: generating fallback configuration') + # generate a network config, blacklist picking mlx4_core devs + netconfig = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True) + evt.description = "network config from fallback" + return netconfig +@azure_ds_telemetry_reporter def get_metadata_from_imds(fallback_nic, retries): """Query Azure's network metadata service, returning a dictionary. @@ -1227,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries): return util.log_time(**kwargs) +@azure_ds_telemetry_reporter def _get_metadata_from_imds(retries): url = IMDS_URL + "instance?api-version=2017-12-01" @@ -1246,6 +1304,7 @@ def _get_metadata_from_imds(retries): return {} +@azure_ds_telemetry_reporter def maybe_remove_ubuntu_network_config_scripts(paths=None): """Remove Azure-specific ubuntu network config for non-primary nics. @@ -1283,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): def _is_platform_viable(seed_dir): - """Check platform environment to report if this datasource may run.""" - asset_tag = util.read_dmi_data('chassis-asset-tag') - if asset_tag == AZURE_CHASSIS_ASSET_TAG: - return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): - return True - return False + with events.ReportEventStack( + name="check-platform-viability", + description="found azure asset tag", + parent=azure_ds_reporter) as evt: + + """Check platform environment to report if this datasource may run.""" + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag == AZURE_CHASSIS_ASSET_TAG: + return True + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): + return True + return False class BrokenAzureDataSource(Exception): diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py old mode 100644 new mode 100755 index 2829dd20..d3af05ee --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,10 +16,27 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit.reporting import events LOG = logging.getLogger(__name__) +azure_ds_reporter = events.ReportEventStack( + name="azure-ds", + description="initialize reporter for azure ds", + reporting_enabled=True) + + +def azure_ds_telemetry_reporter(func): + def impl(*args, **kwargs): + with events.ReportEventStack( + name=func.__name__, + description=func.__name__, + parent=azure_ds_reporter): + return func(*args, **kwargs) + return impl + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -119,6 +136,7 @@ class OpenSSLManager(object): def clean_up(self): util.del_dir(self.tmpdir) + @azure_ds_telemetry_reporter def generate_certificate(self): LOG.debug('Generating certificate for communication with fabric...') if self.certificate is not None: @@ -139,17 +157,20 @@ class OpenSSLManager(object): LOG.debug('New certificate generated.') @staticmethod + @azure_ds_telemetry_reporter def _run_x509_action(action, cert): cmd = ['openssl', 'x509', '-noout', action] result, _ = util.subp(cmd, data=cert) return result + @azure_ds_telemetry_reporter def _get_ssh_key_from_cert(self, certificate): pub_key = self._run_x509_action('-pubkey', certificate) keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] ssh_key, _ = util.subp(keygen_cmd, data=pub_key) return ssh_key + @azure_ds_telemetry_reporter def _get_fingerprint_from_cert(self, certificate): """openssl x509 formats fingerprints as so: 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ @@ -163,6 +184,7 @@ class OpenSSLManager(object): octets = raw_fp[eq+1:-1].split(':') return ''.join(octets) + @azure_ds_telemetry_reporter def _decrypt_certs_from_xml(self, certificates_xml): """Decrypt the certificates XML document using the our private key; return the list of certs and private keys contained in the doc. @@ -185,6 +207,7 @@ class OpenSSLManager(object): shell=True, data=b'\n'.join(lines)) return out + @azure_ds_telemetry_reporter def parse_certificates(self, certificates_xml): """Given the Certificates XML document, return a dictionary of fingerprints and associated SSH keys derived from the certs.""" @@ -265,11 +288,13 @@ class WALinuxAgentShim(object): return socket.inet_ntoa(packed_bytes) @staticmethod + @azure_ds_telemetry_reporter def _networkd_get_value_from_leases(leases_d=None): return dhcp.networkd_get_option_from_leases( 'OPTION_245', leases_d=leases_d) @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] content = util.load_file(fallback_lease_file) @@ -287,6 +312,7 @@ class WALinuxAgentShim(object): return leases[-1] @staticmethod + @azure_ds_telemetry_reporter def _load_dhclient_json(): dhcp_options = {} hooks_dir = WALinuxAgentShim._get_hooks_dir() @@ -305,6 +331,7 @@ class WALinuxAgentShim(object): return dhcp_options @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_dhcpoptions(dhcp_options): if dhcp_options is None: return None @@ -318,6 +345,7 @@ class WALinuxAgentShim(object): return _value @staticmethod + @azure_ds_telemetry_reporter def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None if dhcp245 is not None: @@ -352,6 +380,7 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address + @azure_ds_telemetry_reporter def register_with_azure_and_fetch_data(self, pubkey_info=None): if self.openssl_manager is None: self.openssl_manager = OpenSSLManager() @@ -404,6 +433,7 @@ class WALinuxAgentShim(object): return keys + @azure_ds_telemetry_reporter def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') document = self.REPORT_READY_XML_TEMPLATE.format( @@ -419,6 +449,7 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') +@azure_ds_telemetry_reporter def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, -- cgit v1.2.3 From 528366820bb48c13957d0c58afc2a46a3ba84bef Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Wed, 3 Apr 2019 22:23:29 +0000 Subject: Azure: Treat _unset network configuration as if it were absent When the Azure datasource persists all of its metadata to the instance directory, it deliberately sets the self.network_config value to be the sources.UNSET value. The goal is to ensure that each time the system boots, fresh network configuration data is fetched from the cloud platform so that any control plane changes will take effect. When a VM is first created, there's no pickled instance to restore, so self._network_config is None, resulting in self.network_config() properly building a new config. Azure suffered from LP: #1801364 which prevented ds from being stored in obj.pkl in the instance directory, so subsequent reboots always regenerated their network configuration. Commit 0dc3a77f41f4544e4cb5a41637af7693410d4cdf introduced a new bug in which self.network_config() assumed the self._network_config value was either None or trustable; when the config was unpickled, that value was _unset, thus breaking the assumption. LP: #1823084 --- cloudinit/sources/DataSourceAzure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d4230b3c..76b16616 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -688,7 +688,7 @@ class DataSourceAzure(sources.DataSource): 2. Generate a fallback network config that does not include any of the blacklisted devices. """ - if not self._network_config: + if not self._network_config or self._network_config == sources.UNSET: if self.ds_cfg.get('apply_network_config'): nc_src = self._metadata_imds else: -- cgit v1.2.3 From b76714c355a87416f9f07156b0f025aceaca7296 Mon Sep 17 00:00:00 2001 From: Risto Oikarinen Date: Tue, 9 Apr 2019 18:05:24 +0000 Subject: Change DataSourceNoCloud to ignore file system label's case. NoCloud data source now accepts both 'cidata' and 'CIDATA' as filesystem labels. This is similar to DataSourceConfigDrive's support for 'config-2' and 'CONFIG-2'. --- cloudinit/sources/DataSourceNoCloud.py | 4 ++- doc/rtd/topics/datasources/nocloud.rst | 2 +- tests/unittests/test_datasource/test_nocloud.py | 42 +++++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 17 ++++++++++ tools/ds-identify | 7 +++-- 5 files changed, 67 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 6860f0cc..fcf5d589 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -106,7 +106,9 @@ class DataSourceNoCloud(sources.DataSource): fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) - label_list = util.find_devs_with("LABEL=%s" % label) + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + devlist = list(set(fslist) & set(label_list)) devlist.sort(reverse=True) diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index 08578e86..1c5cf961 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -9,7 +9,7 @@ network at all). You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be -``cidata``. +``cidata`` or ``CIDATA``. Alternatively, you can provide meta-data via kernel command line or SMBIOS "serial number" option. The data must be passed in the form of a string: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 3429272c..b785362f 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'read_dmi_data', return_value=None)) + def _test_fs_config_is_read(self, fs_label, fs_label_to_search): + vfat_device = 'device-1' + + def m_mount_cb(device, callback, mtype): + if (device == vfat_device): + return {'meta-data': yaml.dump({'instance-id': 'IID'})} + else: + return {} + + def m_find_devs_with(query='', path=''): + if 'TYPE=vfat' == query: + return [vfat_device] + elif 'LABEL={}'.format(fs_label) == query: + return [vfat_device] + else: + return [] + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=m_find_devs_with)) + self.mocks.enter_context( + mock.patch.object(util, 'mount_cb', + side_effect=m_mount_cb)) + sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = b"USER_DATA_HERE" @@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase): ret = dsrc.get_data() self.assertFalse(ret) + def test_fs_config_lowercase_label(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'cidata') + + def test_fs_config_uppercase_label(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'cidata') + + def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'CIDATA') + + def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'CIDATA') + def test_no_datasource_expected(self, m_is_lxd): # no source should be found if no cmdline, config, and fs_label=None sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index d00c1b4b..8c18aa1a 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -520,6 +520,10 @@ class TestDsIdentify(DsIdentifyBase): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') + def test_nocloud_upper(self): + """NoCloud is found with uppercase filesystem label.""" + self._test_ds_found('NoCloudUpper') + def test_nocloud_seed(self): """Nocloud seed directory.""" self._test_ds_found('NoCloud-seed') @@ -713,6 +717,19 @@ VALID_CFG = { 'dev/vdb': 'pretend iso content for cidata\n', } }, + 'NoCloudUpper': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'NoCloud-seed': { 'ds': 'NoCloud', 'files': { diff --git a/tools/ds-identify b/tools/ds-identify index b78b2731..6518901e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -620,7 +620,7 @@ dscheck_MAAS() { } dscheck_NoCloud() { - local fslabel="cidata" d="" + local fslabel="cidata CIDATA" d="" case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac @@ -632,9 +632,10 @@ dscheck_NoCloud() { check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done - if has_fs_with_label "${fslabel}"; then + if has_fs_with_label $fslabel; then return ${DS_FOUND} fi + return ${DS_NOT_FOUND} } @@ -762,7 +763,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From 6322c2ddf4b68a8e7cc467a07fb20a1d151a2ef3 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 10 Apr 2019 20:21:37 +0000 Subject: Revert "DataSource: move update_events from a class to an instance..." Moving update_events from a class attribute to an instance attribute means that it doesn't exist on DataSource objects that are unpickled, causing tracebacks on cloud-init upgrade. As this change is only required for cloud-init installations which don't utilise ds-identify, we're backing it out to be reintroduced once the upgrade path bug has been addressed. This reverts commit f2fd6eac4407e60d0e98826ab03847dda4cde138. --- cloudinit/sources/DataSourceScaleway.py | 3 +-- cloudinit/sources/__init__.py | 6 +++--- cloudinit/sources/tests/test_init.py | 15 --------------- tests/unittests/test_datasource/test_scaleway.py | 7 ------- 4 files changed, 4 insertions(+), 27 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 54bfc1fe..b573b382 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -171,11 +171,10 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" + update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) - self.update_events = { - 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 1604932d..e6966b31 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -164,6 +164,9 @@ class DataSource(object): # A datasource which supports writing network config on each system boot # would call update_events['network'].add(EventType.BOOT). + # Default: generate network config on new instance id (first boot). + update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} + # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute # values are reset via clear_cached_attrs during any update_metadata call. @@ -188,9 +191,6 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None - # Default: generate network config on new instance id (first boot). - self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} - self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index cb1912be..6378e98b 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -575,21 +575,6 @@ class TestDataSource(CiTestCase): " events: New instance first boot", self.logs.getvalue()) - def test_data_sources_cant_mutate_update_events_for_others(self): - """update_events shouldn't be changed for other DSes (LP: #1819913)""" - - class ModifyingDS(DataSource): - - def __init__(self, sys_cfg, distro, paths): - # This mirrors what DataSourceAzure does which causes LP: - # #1819913 - DataSource.__init__(self, sys_cfg, distro, paths) - self.update_events['network'].add(EventType.BOOT) - - before_update_events = copy.deepcopy(self.datasource.update_events) - ModifyingDS(self.sys_cfg, self.distro, self.paths) - self.assertEqual(before_update_events, self.datasource.update_events) - class TestRedactSensitiveData(CiTestCase): diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 3bfd7527..f96bf0a2 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,7 +7,6 @@ import requests from cloudinit import helpers from cloudinit import settings -from cloudinit.event import EventType from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -404,9 +403,3 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') - - def test_update_events_is_correct(self): - """ensure update_events contains correct data""" - self.assertEqual( - {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, - self.datasource.update_events) -- cgit v1.2.3 From 937555fd422edf8235430afab3c0ab69f9e3b3a4 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 18 Apr 2019 16:08:20 +0000 Subject: mount_cb: do not pass sync and rw options to mount On FreeBSD, mount_cd9660 does not accept the sync option that is enabled by default. In addition, the sync is only useful with the `rw` mode. However the `rw` mode was never used. This patch removes the `rw` and `sync` parameter of `mount_cb` to simplify the code base and resolve the FreeBSD issue. LP: #1645824 --- cloudinit/sources/DataSourceAzure.py | 2 +- cloudinit/sources/DataSourceConfigDrive.py | 7 ++----- cloudinit/util.py | 15 ++------------- 3 files changed, 5 insertions(+), 19 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 76b16616..64165259 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -407,7 +407,7 @@ class DataSourceAzure(sources.DataSource): elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, - mtype="udf", sync=False) + mtype="udf") else: ret = util.mount_cb(cdev, load_azure_ds_dir) else: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 564e3eb3..571d30dc 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -72,15 +72,12 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): dslist = self.sys_cfg.get('datasource_list') for dev in find_candidate_devs(dslist=dslist): try: - # Set mtype if freebsd and turn off sync - if dev.startswith("/dev/cd"): + if util.is_FreeBSD() and dev.startswith("/dev/cd"): mtype = "cd9660" - sync = False else: mtype = None - sync = True results = util.mount_cb(dev, read_config_drive, - mtype=mtype, sync=sync) + mtype=mtype) found = dev except openstack.NonReadable: pass diff --git a/cloudinit/util.py b/cloudinit/util.py index 385f231c..ea4199cd 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1679,7 +1679,7 @@ def mounts(): return mounted -def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, +def mount_cb(device, callback, data=None, mtype=None, update_env_for_mount=None): """ Mount the device, call method 'callback' passing the directory @@ -1726,18 +1726,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, for mtype in mtypes: mountpoint = None try: - mountcmd = ['mount'] - mountopts = [] - if rw: - mountopts.append('rw') - else: - mountopts.append('ro') - if sync: - # This seems like the safe approach to do - # (ie where this is on by default) - mountopts.append("sync") - if mountopts: - mountcmd.extend(["-o", ",".join(mountopts)]) + mountcmd = ['mount', '-o', 'ro'] if mtype: mountcmd.extend(['-t', mtype]) mountcmd.append(device) -- cgit v1.2.3 From ab6621d849b24bb652243e88c79f6f3b446048d7 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 8 May 2019 14:54:03 +0000 Subject: DataSourceAzure: Adjust timeout for polling IMDS If the IMDS primary server is not available, falling back to the secondary server takes about 1s. The net result is that the expected E2E time is slightly more than 1s. This change increases the timeout to 2s to prevent the infinite loop of timeouts. --- cloudinit/sources/DataSourceAzure.py | 15 ++++++++++----- tests/unittests/test_datasource/test_azure.py | 10 +++++++--- 2 files changed, 17 insertions(+), 8 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 64165259..b7440c1d 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -57,7 +57,12 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' + +# In the event where the IMDS primary server is not +# available, it takes 1s to fallback to the secondary one +IMDS_TIMEOUT_IN_SECONDS = 2 IMDS_URL = "http://169.254.169.254/metadata/" + PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by @@ -582,9 +587,9 @@ class DataSourceAzure(sources.DataSource): return self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True, - log_req_resp=False).contents + return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, exception_cb=exc_cb, + infinite=True, log_req_resp=False).contents except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -1291,8 +1296,8 @@ def _get_metadata_from_imds(retries): headers = {"Metadata": "true"} try: response = readurl( - url, timeout=1, headers=headers, retries=retries, - exception_cb=retry_on_url_exc) + url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, + retries=retries, exception_cb=retry_on_url_exc) except Exception as e: LOG.debug('Ignoring IMDS instance metadata: %s', e) return {} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index ab77c034..427ab7e7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -163,7 +163,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): m_readurl.assert_called_with( self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, timeout=1) + headers={'Metadata': 'true'}, retries=2, + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) @mock.patch('cloudinit.url_helper.time.sleep') @mock.patch(MOCKPATH + 'net.is_up') @@ -1791,7 +1792,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs() - }, method='GET', timeout=1, + }, method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( @@ -1828,7 +1830,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs()}, - method='GET', timeout=1, url=full_url)]) + method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', -- cgit v1.2.3 From 9aa97cfc73b31dc548a240e5f4bd1ef41861cc4d Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 8 May 2019 15:14:31 +0000 Subject: replace remaining occurrences of LOG.warn --- cloudinit/sources/DataSourceCloudStack.py | 2 +- cloudinit/sources/DataSourceEc2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index d4b758f2..f185dc71 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -95,7 +95,7 @@ class DataSourceCloudStack(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warn) + timeout=url_params.timeout_seconds, status_cb=LOG.warning) if url: LOG.debug("Using metadata source: '%s'", url) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index ac28f1db..5c017bfb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -208,7 +208,7 @@ class DataSourceEc2(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warn) + timeout=url_params.timeout_seconds, status_cb=LOG.warning) if url: self.metadata_address = url2base[url] -- cgit v1.2.3 From baa478546d8cac98a706010699d64f8c2f70b5bf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 10 May 2019 18:38:55 +0000 Subject: Azure: Return static fallback address as if failed to find endpoint The Azure data source helper attempts to use information in the dhcp lease to find the Wireserver endpoint (IP address). Under some unusual circumstances, those attempts will fail. This change uses a static address, known to be always correct in the Azure public and sovereign clouds, when the helper fails to locate a valid dhcp lease. This address is not guaranteed to be correct in Azure Stack environments; it's still best to use the information from the lease whenever possible. --- cloudinit/sources/helpers/azure.py | 14 +++++++++++--- tests/unittests/test_datasource/test_azure_helper.py | 9 +++++++-- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index d3af05ee..82c4c8c4 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -20,6 +20,9 @@ from cloudinit.reporting import events LOG = logging.getLogger(__name__) +# This endpoint matches the format as found in dhcp lease files, since this +# value is applied if the endpoint can't be found within a lease file +DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" azure_ds_reporter = events.ReportEventStack( name="azure-ds", @@ -297,7 +300,12 @@ class WALinuxAgentShim(object): @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] - content = util.load_file(fallback_lease_file) + try: + content = util.load_file(fallback_lease_file) + except IOError as ex: + LOG.error("Failed to read %s: %s", fallback_lease_file, ex) + return None + LOG.debug("content is %s", content) option_name = _get_dhcp_endpoint_option_name() for line in content.splitlines(): @@ -372,9 +380,9 @@ class WALinuxAgentShim(object): fallback_lease_file) value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) - if value is None: - raise ValueError('No endpoint found.') + LOG.warning("No lease found; using default endpoint") + value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 02556165..bd006aba 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -67,12 +67,17 @@ class TestFindEndpoint(CiTestCase): self.networkd_leases.return_value = None def test_missing_file(self): - self.assertRaises(ValueError, wa_shim.find_endpoint) + """wa_shim find_endpoint uses default endpoint if leasefile not found + """ + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") def test_missing_special_azure_line(self): + """wa_shim find_endpoint uses default endpoint if leasefile is found + but does not contain DHCP Option 245 (whose value is the endpoint) + """ self.load_file.return_value = '' self.dhcp_options.return_value = {'eth0': {'key': 'value'}} - self.assertRaises(ValueError, wa_shim.find_endpoint) + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") @staticmethod def _build_lease_content(encoded_address): -- cgit v1.2.3 From 0f8695323262e41c699588c7cd140f6b58c62017 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Fri, 24 May 2019 21:39:19 +0000 Subject: freebsd: NoCloud data source support blkid is a Linux-only command. With this patch, cloud-init uses another approach to find the data source on FreeBSD. LP: #1645824 --- cloudinit/sources/DataSourceNoCloud.py | 40 ++++++++++++++----------- config/cloud.cfg.tmpl | 4 +-- tests/unittests/test_datasource/test_nocloud.py | 18 +++++++++++ 3 files changed, 43 insertions(+), 19 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index fcf5d589..8a9e5dd2 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -35,6 +35,26 @@ class DataSourceNoCloud(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) + def _get_devices(self, label): + if util.is_FreeBSD(): + devlist = [ + p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label] + if os.path.exists(p)] + else: + # Query optical drive to get it in blkid cache for 2.6 kernels + util.find_devs_with(path="/dev/sr0") + util.find_devs_with(path="/dev/sr1") + + fslist = util.find_devs_with("TYPE=vfat") + fslist.extend(util.find_devs_with("TYPE=iso9660")) + + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + + devlist = list(set(fslist) & set(label_list)) + devlist.sort(reverse=True) + return devlist + def _get_data(self): defaults = { "instance-id": "nocloud", @@ -99,20 +119,7 @@ class DataSourceNoCloud(sources.DataSource): label = self.ds_cfg.get('fs_label', "cidata") if label is not None: - # Query optical drive to get it in blkid cache for 2.6 kernels - util.find_devs_with(path="/dev/sr0") - util.find_devs_with(path="/dev/sr1") - - fslist = util.find_devs_with("TYPE=vfat") - fslist.extend(util.find_devs_with("TYPE=iso9660")) - - label_list = util.find_devs_with("LABEL=%s" % label.upper()) - label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) - - devlist = list(set(fslist) & set(label_list)) - devlist.sort(reverse=True) - - for dev in devlist: + for dev in self._get_devices(label): try: LOG.debug("Attempting to use data from %s", dev) @@ -120,9 +127,8 @@ class DataSourceNoCloud(sources.DataSource): seeded = util.mount_cb(dev, _pp2d_callback, pp2d_kwargs) except ValueError: - if dev in label_list: - LOG.warning("device %s with label=%s not a" - "valid seed.", dev, label) + LOG.warning("device %s with label=%s not a" + "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 25db43e0..684c7473 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -32,8 +32,8 @@ preserve_hostname: false {% if variant in ["freebsd"] %} # This should not be required, but leave it in place until the real cause of -# not beeing able to find -any- datasources is resolved. -datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] +# not finding -any- datasources is resolved. +datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] {% endif %} # Example datasource config # datasource: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index b785362f..18bea0b9 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -278,6 +278,24 @@ class TestNoCloudDataSource(CiTestCase): self.assertEqual(netconf, dsrc.network_config) self.assertNotIn(gateway, str(dsrc.network_config)) + @mock.patch("cloudinit.util.blkid") + def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + self.mocks.enter_context( + mock.patch.object(util, 'is_FreeBSD', return_value=True)) + + self.mocks.enter_context( + mock.patch.object(os.path, 'exists', return_value=True)) + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc._get_devices('foo') + self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) + fake_blkid.assert_not_called() + class TestParseCommandLineData(CiTestCase): -- cgit v1.2.3 From feebec1cbb462208003460d68d909e76cb68e0e2 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 25 Jun 2019 16:06:27 +0000 Subject: azure: add region and AZ properties from imds compute location metadata This allows cloud-init query region to show valid region data for Azure --- cloudinit/sources/DataSourceAzure.py | 9 +++++ tests/unittests/test_datasource/test_azure.py | 47 +++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b7440c1d..d2fad9bb 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -683,6 +683,11 @@ class DataSourceAzure(sources.DataSource): DS_CFG_KEY_PRESERVE_NTFS, False)) return + @property + def availability_zone(self): + return self.metadata.get( + 'imds', {}).get('compute', {}).get('platformFaultDomain') + @property def network_config(self): """Generate a network config like net.generate_fallback_network() with @@ -701,6 +706,10 @@ class DataSourceAzure(sources.DataSource): self._network_config = parse_network_config(nc_src) return self._network_config + @property + def region(self): + return self.metadata.get('imds', {}).get('compute', {}).get('location') + def _partitions_on_device(devpath, maxnum=16): # return a list of tuples (ptnum, path) for each part on devpath diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index afb614e4..f27ef21b 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -84,6 +84,25 @@ def construct_valid_ovf_env(data=None, pubkeys=None, NETWORK_METADATA = { + "compute": { + "location": "eastus2", + "name": "my-hostname", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "srugroup1", + "sku": "19.04-DAILY", + "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", + "tags": "", + "version": "19.04.201906190", + "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", + "vmScaleSetName": "", + "vmSize": "Standard_DS1_v2", + "zone": "" + }, "network": { "interface": [ { @@ -478,13 +497,7 @@ scbus-1 on xpt0 bus 0 expected_metadata = { 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, - 'imds': {'network': {'interface': [{ - 'ipv4': {'ipAddress': [ - {'privateIpAddress': '10.0.0.4', - 'publicIpAddress': '104.46.124.81'}], - 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, - 'ipv6': {'ipAddress': []}, - 'macAddress': '000D3A047598'}]}}, + 'imds': NETWORK_METADATA, 'instance-id': 'test-instance-id', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -612,6 +625,26 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) + def test_availability_zone_set_from_imds(self): + """Datasource.availability returns IMDS platformFaultDomain.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('0', dsrc.availability_zone) + + def test_region_set_from_imds(self): + """Datasource.region returns IMDS region location.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('eastus2', dsrc.region) + def test_user_cfg_set_agent_command(self): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} -- cgit v1.2.3 From 5e4792cd11a4754384bd70e4dc94413976017ae8 Mon Sep 17 00:00:00 2001 From: Markus Schade Date: Wed, 3 Jul 2019 17:04:46 +0000 Subject: Add missing dsname for Hetzner Cloud datasource --- cloudinit/sources/DataSourceHetzner.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 5c75b65b..50298330 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -28,6 +28,9 @@ MD_WAIT_RETRY = 2 class DataSourceHetzner(sources.DataSource): + + dsname = 'Hetzner' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro -- cgit v1.2.3 From 217c89369c3b16f11333f0090e059f76fc5d7937 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 10 Jul 2019 23:17:12 +0000 Subject: Fix a couple of issues raised by a coverity scan * cc_lxd: fix copy/paste error in debug logging * DataSourceCloudSigma: remove unreachable code * This unreachable code was introduced in a refactor (in 2015) which removed the need for an exception handler, but retained the logging from the exception handler as an unreachable fall-through. --- cloudinit/config/cc_lxd.py | 2 +- cloudinit/sources/DataSourceCloudSigma.py | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 71d13ed8..d9830770 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -152,7 +152,7 @@ def handle(name, cfg, cloud, log, args): if cmd_attach: log.debug("Setting up default lxd bridge: %s" % - " ".join(cmd_create)) + " ".join(cmd_attach)) _lxc(cmd_attach) elif bridge_cfg: diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 2955d3f0..df88f677 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -42,12 +42,8 @@ class DataSourceCloudSigma(sources.DataSource): if not sys_product_name: LOG.debug("system-product-name not available in dmi data") return False - else: - LOG.debug("detected hypervisor as %s", sys_product_name) - return 'cloudsigma' in sys_product_name.lower() - - LOG.warning("failed to query dmi data for system product name") - return False + LOG.debug("detected hypervisor as %s", sys_product_name) + return 'cloudsigma' in sys_product_name.lower() def _get_data(self): """ -- cgit v1.2.3 From 9c47c682b7aaa185c32a68f4dea8e23e9a2ef565 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Tue, 16 Jul 2019 13:09:38 +0000 Subject: VMWare: Trigger the post customization script via cc_scripts module. cloud-init does not trigger reboots of a VM therefore adding custom scripts to rc.local does not execute the post scripts. This patch moves post-scripts into per-instance scripts dir and has cc_scripts module run the post-scripts. Also in this branch: - Remove the sh interpreter and execute the customization script directly. - Update the unit test. LP: #1833192 --- cloudinit/sources/DataSourceOVF.py | 7 +- .../helpers/vmware/imc/config_custom_script.py | 143 ++++++--------------- tests/unittests/test_vmware/test_custom_script.py | 116 +++++++++-------- 3 files changed, 111 insertions(+), 155 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 70e7a5c0..dd941d2e 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -148,6 +148,9 @@ class DataSourceOVF(sources.DataSource): product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name + ccScriptsDir = os.path.join( + self.paths.get_cpath("scripts"), + "per-instance") except Exception as e: _raise_error_status( "Error parsing the customization Config File", @@ -201,7 +204,9 @@ class DataSourceOVF(sources.DataSource): if customscript: try: - postcust = PostCustomScript(customscript, imcdirpath) + postcust = PostCustomScript(customscript, + imcdirpath, + ccScriptsDir) postcust.execute() except Exception as e: _raise_error_status( diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py index a7d4ad91..9f14770e 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py @@ -1,5 +1,5 @@ # Copyright (C) 2017 Canonical Ltd. -# Copyright (C) 2017 VMware Inc. +# Copyright (C) 2017-2019 VMware Inc. # # Author: Maitreyee Saikia # @@ -8,7 +8,6 @@ import logging import os import stat -from textwrap import dedent from cloudinit import util @@ -20,12 +19,15 @@ class CustomScriptNotFound(Exception): class CustomScriptConstant(object): - RC_LOCAL = "/etc/rc.local" - POST_CUST_TMP_DIR = "/root/.customization" - POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh" - POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR, - POST_CUST_RUN_SCRIPT_NAME) - POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + CUSTOM_TMP_DIR = "/root/.customization" + + # The user defined custom script + CUSTOM_SCRIPT_NAME = "customize.sh" + CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, + CUSTOM_SCRIPT_NAME) + POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + # The cc_scripts_per_instance script to launch custom script + POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh" class RunCustomScript(object): @@ -39,10 +41,19 @@ class RunCustomScript(object): raise CustomScriptNotFound("Script %s not found!! " "Cannot execute custom script!" % self.scriptpath) + + util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR) + + LOG.debug("Copying custom script to %s", + CustomScriptConstant.CUSTOM_SCRIPT) + util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT) + # Strip any CR characters from the decoded script - util.load_file(self.scriptpath).replace("\r", "") - st = os.stat(self.scriptpath) - os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC) + content = util.load_file( + CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "") + util.write_file(CustomScriptConstant.CUSTOM_SCRIPT, + content, + mode=0o544) class PreCustomScript(RunCustomScript): @@ -50,104 +61,34 @@ class PreCustomScript(RunCustomScript): """Executing custom script with precustomization argument.""" LOG.debug("Executing pre-customization script") self.prepare_script() - util.subp(["/bin/sh", self.scriptpath, "precustomization"]) + util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"]) class PostCustomScript(RunCustomScript): - def __init__(self, scriptname, directory): + def __init__(self, scriptname, directory, ccScriptsDir): super(PostCustomScript, self).__init__(scriptname, directory) - # Determine when to run custom script. When postreboot is True, - # the user uploaded script will run as part of rc.local after - # the machine reboots. This is determined by presence of rclocal. - # When postreboot is False, script will run as part of cloud-init. - self.postreboot = False - - def _install_post_reboot_agent(self, rclocal): - """ - Install post-reboot agent for running custom script after reboot. - As part of this process, we are editing the rclocal file to run a - VMware script, which in turn is resposible for handling the user - script. - @param: path to rc local. - """ - LOG.debug("Installing post-reboot customization from %s to %s", - self.directory, rclocal) - if not self.has_previous_agent(rclocal): - LOG.info("Adding post-reboot customization agent to rc.local") - new_content = dedent(""" - # Run post-reboot guest customization - /bin/sh %s - exit 0 - """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT - existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') - st = os.stat(rclocal) - # "x" flag should be set - mode = st.st_mode | stat.S_IEXEC - util.write_file(rclocal, existing_rclocal + new_content, mode) - - else: - # We don't need to update rclocal file everytime a customization - # is requested. It just needs to be done for the first time. - LOG.info("Post-reboot guest customization agent is already " - "registered in rc.local") - LOG.debug("Installing post-reboot customization agent finished: %s", - self.postreboot) - - def has_previous_agent(self, rclocal): - searchstring = "# Run post-reboot guest customization" - if searchstring in open(rclocal).read(): - return True - return False - - def find_rc_local(self): - """ - Determine if rc local is present. - """ - rclocal = "" - if os.path.exists(CustomScriptConstant.RC_LOCAL): - LOG.debug("rc.local detected.") - # resolving in case of symlink - rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL) - LOG.debug("rc.local resolved to %s", rclocal) - else: - LOG.warning("Can't find rc.local, post-customization " - "will be run before reboot") - return rclocal - - def install_agent(self): - rclocal = self.find_rc_local() - if rclocal: - self._install_post_reboot_agent(rclocal) - self.postreboot = True + self.ccScriptsDir = ccScriptsDir + self.ccScriptPath = os.path.join( + ccScriptsDir, + CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME) def execute(self): """ - This method executes post-customization script before or after reboot - based on the presence of rc local. + This method copy the post customize run script to + cc_scripts_per_instance directory and let this + module to run post custom script. """ self.prepare_script() - self.install_agent() - if not self.postreboot: - LOG.warning("Executing post-customization script inline") - util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) - else: - LOG.debug("Scheduling custom script to run post reboot") - if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): - os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) - # Script "post-customize-guest.sh" and user uploaded script are - # are present in the same directory and needs to copied to a temp - # directory to be executed post reboot. User uploaded script is - # saved as customize.sh in the temp directory. - # post-customize-guest.sh excutes customize.sh after reboot. - LOG.debug("Copying post-customization script") - util.copy(self.scriptpath, - CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") - LOG.debug("Copying script to run post-customization script") - util.copy( - os.path.join(self.directory, - CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), - CustomScriptConstant.POST_CUST_RUN_SCRIPT) - LOG.info("Creating post-reboot pending marker") - util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER) + + LOG.debug("Copying post customize run script to %s", + self.ccScriptPath) + util.copy( + os.path.join(self.directory, + CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME), + self.ccScriptPath) + st = os.stat(self.ccScriptPath) + os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC) + LOG.info("Creating post customization pending marker") + util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER) # vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/test_vmware/test_custom_script.py index 2d9519b0..f89f8157 100644 --- a/tests/unittests/test_vmware/test_custom_script.py +++ b/tests/unittests/test_vmware/test_custom_script.py @@ -1,10 +1,12 @@ # Copyright (C) 2015 Canonical Ltd. -# Copyright (C) 2017 VMware INC. +# Copyright (C) 2017-2019 VMware INC. # # Author: Maitreyee Saikia # # This file is part of cloud-init. See LICENSE file for license information. +import os +import stat from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptConstant, @@ -18,6 +20,10 @@ from cloudinit.tests.helpers import CiTestCase, mock class TestVmwareCustomScript(CiTestCase): def setUp(self): self.tmpDir = self.tmp_dir() + # Mock the tmpDir as the root dir in VM. + self.execDir = os.path.join(self.tmpDir, ".customization") + self.execScript = os.path.join(self.execDir, + ".customize.sh") def test_prepare_custom_script(self): """ @@ -37,63 +43,67 @@ class TestVmwareCustomScript(CiTestCase): # Custom script exists. custScript = self.tmp_path("test-cust", self.tmpDir) - util.write_file(custScript, "test-CR-strip/r/r") - postCust = PostCustomScript("test-cust", self.tmpDir) - self.assertEqual("test-cust", postCust.scriptname) - self.assertEqual(self.tmpDir, postCust.directory) - self.assertEqual(custScript, postCust.scriptpath) - self.assertFalse(postCust.postreboot) - postCust.prepare_script() - # Check if all carraige returns are stripped from script. - self.assertFalse("/r" in custScript) + util.write_file(custScript, "test-CR-strip\r\r") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + postCust = PostCustomScript("test-cust", + self.tmpDir, + self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + postCust.prepare_script() - def test_rc_local_exists(self): - """ - This test is designed to verify the different scenarios associated - with the presence of rclocal. - """ - # test when rc local does not exist - postCust = PostCustomScript("test-cust", self.tmpDir) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", "/no/path"): - rclocal = postCust.find_rc_local() - self.assertEqual("", rclocal) - - # test when rc local exists - rclocalFile = self.tmp_path("vmware-rclocal", self.tmpDir) - util.write_file(rclocalFile, "# Run post-reboot guest customization", - omode="w") - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalFile): - rclocal = postCust.find_rc_local() - self.assertEqual(rclocalFile, rclocal) - self.assertTrue(postCust.has_previous_agent, rclocal) - - # test when rc local is a symlink - rclocalLink = self.tmp_path("dummy-rclocal-link", self.tmpDir) - util.sym_link(rclocalFile, rclocalLink, True) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalLink): - rclocal = postCust.find_rc_local() - self.assertEqual(rclocalFile, rclocal) + # Custom script is copied with exec privilege + self.assertTrue(os.path.exists(self.execScript)) + st = os.stat(self.execScript) + self.assertTrue(st.st_mode & stat.S_IEXEC) + with open(self.execScript, "r") as f: + content = f.read() + self.assertEqual(content, "test-CR-strip") + # Check if all carraige returns are stripped from script. + self.assertFalse("\r" in content) def test_execute_post_cust(self): """ - This test is to identify if rclocal was properly populated to be - run after reboot. + This test is designed to verify the behavior after execute post + customization. """ - customscript = self.tmp_path("vmware-post-cust-script", self.tmpDir) - rclocal = self.tmp_path("vmware-rclocal", self.tmpDir) - # Create a temporary rclocal file - open(customscript, "w") - util.write_file(rclocal, "tests\nexit 0", omode="w") - postCust = PostCustomScript("vmware-post-cust-script", self.tmpDir) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocal): - # Test that guest customization agent is not installed initially. - self.assertFalse(postCust.postreboot) - self.assertIs(postCust.has_previous_agent(rclocal), False) - postCust.install_agent() + # Prepare the customize package + postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) + util.write_file(postCustRun, "This is the script to run post cust") + userScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(userScript, "This is the post cust script") - # Assert rclocal has been modified to have guest customization - # agent. - self.assertTrue(postCust.postreboot) - self.assertTrue(postCust.has_previous_agent, rclocal) + # Mock the cc_scripts_per_instance dir and marker file. + # Create another tmp dir for cc_scripts_per_instance. + ccScriptDir = self.tmp_dir() + ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") + markerFile = os.path.join(self.tmpDir, ".markerFile") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + with mock.patch.object(CustomScriptConstant, + "POST_CUSTOM_PENDING_MARKER", + markerFile): + postCust = PostCustomScript("test-cust", + self.tmpDir, + ccScriptDir) + postCust.execute() + # Check cc_scripts_per_instance and marker file + # are created. + self.assertTrue(os.path.exists(ccScript)) + with open(ccScript, "r") as f: + content = f.read() + self.assertEqual(content, + "This is the script to run post cust") + self.assertTrue(os.path.exists(markerFile)) # vi: ts=4 expandtab -- cgit v1.2.3 From 1dbede64dc645b090b4047a105143b5d5090d214 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 23 Jul 2019 22:07:11 +0000 Subject: stages: allow data sources to override network config source order Currently, if a platform provides any network configuration via the "cmdline" method (i.e. network-data=... on the kernel command line, ip=... on the kernel command line, or iBFT config via /run/net-*.conf), the value of the data source's network_config property is completely ignored. This means that on platforms that use iSCSI boot (such as Oracle Compute Infrastructure), there is no way for the data source to configure any network interfaces other than those that have already been configured by the initramfs. This change allows data sources to specify the order in which network configuration sources are considered. Data sources that opt to use this mechanism will be expected to consume the command line network data and integrate it themselves. (The generic merging of network configuration sources was considered, but we concluded that the single use case we have presently (a) didn't warrant the increased complexity, and (b) didn't give us a broad enough view to be sure that our generic implementation would be sufficiently generic. This change in no way precludes a merging strategy in future.) --- cloudinit/sources/__init__.py | 16 ++++++ cloudinit/stages.py | 37 ++++++++++---- cloudinit/tests/test_stages.py | 71 ++++++++++++++++++++++---- tests/unittests/test_datasource/test_common.py | 11 ++++ 4 files changed, 116 insertions(+), 19 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6966b31..9d249366 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -66,6 +66,13 @@ CLOUD_ID_REGION_PREFIX_MAP = { 'china': ('azure-china', lambda c: c == 'azure'), # only change azure } +# NetworkConfigSource represents the canonical list of network config sources +# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton +# namedtuple as an enum; see https://stackoverflow.com/a/6971002) +_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback') +NetworkConfigSource = namedtuple('NetworkConfigSource', + _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) + class DataSourceNotFoundException(Exception): pass @@ -153,6 +160,15 @@ class DataSource(object): # Track the discovered fallback nic for use in configuration generation. _fallback_interface = None + # The network configuration sources that should be considered for this data + # source. (The first source in this list that provides network + # configuration will be used without considering any that follow.) This + # should always be a subset of the members of NetworkConfigSource with no + # duplicate entries. + network_config_sources = (NetworkConfigSource.cmdline, + NetworkConfigSource.system_cfg, + NetworkConfigSource.ds) + # read_url_params url_max_wait = -1 # max_wait < 0 means do not wait url_timeout = 10 # timeout for each metadata url read attempt diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5f9d47b9..6bcda2d1 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -24,6 +24,7 @@ from cloudinit.handlers.shell_script import ShellScriptPartHandler from cloudinit.handlers.upstart_job import UpstartJobPartHandler from cloudinit.event import EventType +from cloudinit.sources import NetworkConfigSource from cloudinit import cloud from cloudinit import config @@ -630,19 +631,37 @@ class Init(object): if os.path.exists(disable_file): return (None, disable_file) - cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config()) - dscfg = ('ds', None) + available_cfgs = { + NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(), + NetworkConfigSource.ds: None, + NetworkConfigSource.system_cfg: self.cfg.get('network'), + } + if self.datasource and hasattr(self.datasource, 'network_config'): - dscfg = ('ds', self.datasource.network_config) - sys_cfg = ('system_cfg', self.cfg.get('network')) + available_cfgs[NetworkConfigSource.ds] = ( + self.datasource.network_config) - for loc, ncfg in (cmdline_cfg, sys_cfg, dscfg): + if self.datasource: + order = self.datasource.network_config_sources + else: + order = sources.DataSource.network_config_sources + for cfg_source in order: + if not hasattr(NetworkConfigSource, cfg_source): + LOG.warning('data source specifies an invalid network' + ' cfg_source: %s', cfg_source) + continue + if cfg_source not in available_cfgs: + LOG.warning('data source specifies an unavailable network' + ' cfg_source: %s', cfg_source) + continue + ncfg = available_cfgs[cfg_source] if net.is_disabled_cfg(ncfg): - LOG.debug("network config disabled by %s", loc) - return (None, loc) + LOG.debug("network config disabled by %s", cfg_source) + return (None, cfg_source) if ncfg: - return (ncfg, loc) - return (self.distro.generate_fallback_config(), "fallback") + return (ncfg, cfg_source) + return (self.distro.generate_fallback_config(), + NetworkConfigSource.fallback) def _apply_netcfg_names(self, netcfg): try: diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index 9b483121..7e13e29d 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -6,6 +6,7 @@ import os from cloudinit import stages from cloudinit import sources +from cloudinit.sources import NetworkConfigSource from cloudinit.event import EventType from cloudinit.util import write_file @@ -63,7 +64,7 @@ class TestInit(CiTestCase): """find_networking_config returns when disabled by kernel cmdline.""" m_cmdline.return_value = {'config': 'disabled'} self.assertEqual( - (None, 'cmdline'), + (None, NetworkConfigSource.cmdline), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by cmdline\n', self.logs.getvalue()) @@ -78,7 +79,7 @@ class TestInit(CiTestCase): self.init.datasource = FakeDataSource( network_config={'config': 'disabled'}) self.assertEqual( - (None, 'ds'), + (None, NetworkConfigSource.ds), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by ds\n', self.logs.getvalue()) @@ -90,11 +91,61 @@ class TestInit(CiTestCase): self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {'config': 'disabled'}} self.assertEqual( - (None, 'system_cfg'), + (None, NetworkConfigSource.system_cfg), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by system_cfg\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_uses_datasrc_order(self, m_cmdline): + """find_networking_config should check sources in DS defined order""" + # cmdline, which would normally be preferred over other sources, + # disables networking; in this case, though, the DS moves cmdline later + # so its own config is preferred + m_cmdline.return_value = {'config': 'disabled'} + + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.ds, NetworkConfigSource.system_cfg, + NetworkConfigSource.cmdline] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_invalid_src( + self, m_cmdline): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + 'invalid_src', NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an invalid network' + ' cfg_source: invalid_src', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( + self, m_cmdline): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.fallback, NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an unavailable network' + ' cfg_source: fallback', + self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') def test_wb__find_networking_config_returns_kernel(self, m_cmdline): """find_networking_config returns kernel cmdline config if present.""" @@ -105,7 +156,7 @@ class TestInit(CiTestCase): self.init.datasource = FakeDataSource( network_config={'config': ['fakedatasource']}) self.assertEqual( - (expected_cfg, 'cmdline'), + (expected_cfg, NetworkConfigSource.cmdline), self.init._find_networking_config()) @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') @@ -118,7 +169,7 @@ class TestInit(CiTestCase): self.init.datasource = FakeDataSource( network_config={'config': ['fakedatasource']}) self.assertEqual( - (expected_cfg, 'system_cfg'), + (expected_cfg, NetworkConfigSource.system_cfg), self.init._find_networking_config()) @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') @@ -129,7 +180,7 @@ class TestInit(CiTestCase): expected_cfg = {'config': ['fakedatasource']} self.init.datasource = FakeDataSource(network_config=expected_cfg) self.assertEqual( - (expected_cfg, 'ds'), + (expected_cfg, NetworkConfigSource.ds), self.init._find_networking_config()) @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') @@ -148,7 +199,7 @@ class TestInit(CiTestCase): distro = self.init.distro distro.generate_fallback_config = fake_generate_fallback self.assertEqual( - (fake_cfg, 'fallback'), + (fake_cfg, NetworkConfigSource.fallback), self.init._find_networking_config()) self.assertNotIn('network config disabled', self.logs.getvalue()) @@ -177,7 +228,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} @@ -199,7 +250,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback self.init._find_networking_config = fake_network_config self.init.apply_network_config(True) @@ -223,7 +274,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 6b01a4ea..2a9cfb29 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -83,4 +83,15 @@ class ExpectedDataSources(test_helpers.TestCase): self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) +class TestDataSourceInvariants(test_helpers.TestCase): + + def test_data_sources_have_valid_network_config_sources(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + for cfg_src in ds.network_config_sources: + fail_msg = ('{} has an invalid network_config_sources entry:' + ' {}'.format(str(ds), cfg_src)) + self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), + fail_msg) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 496aaa947ec563bd02b3148f220ff0afe1b32abb Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 26 Jul 2019 20:40:18 +0000 Subject: net/cmdline: split interfaces_by_mac and init network config determination Previously "cmdline" network configuration could be either user-specified network-config=... configuration data, or initramfs-provided configuration data. Before data sources could modify the order in which network config sources were considered, this conflation didn't matter (and, indeed, in the default data source configuration it will continue to not matter). However, it _is_ desirable for a data source to be able to specify that its network configuration should be preferred over the initramfs-provided network configuration but still allow explicit network-config=... configuration passed to the kernel cmdline to continue to override both of those sources. (This also modifies the Oracle data source to use read_initramfs_config directly, which is effectively what it was using read_kernel_cmdline_config for previously.) --- cloudinit/net/cmdline.py | 25 ++++++---- cloudinit/sources/DataSourceOracle.py | 10 ++-- cloudinit/sources/__init__.py | 3 +- cloudinit/sources/tests/test_oracle.py | 19 ++++---- cloudinit/stages.py | 1 + cloudinit/tests/test_stages.py | 83 ++++++++++++++++++++++++++++------ tests/unittests/test_net.py | 22 ++++----- 7 files changed, 112 insertions(+), 51 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index f89a0f73..556a10f3 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -177,21 +177,13 @@ def _is_initramfs_netconfig(files, cmdline): return False -def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): +def read_initramfs_config(files=None, mac_addrs=None, cmdline=None): if cmdline is None: cmdline = util.get_cmdline() if files is None: files = _get_klibc_net_cfg_files() - if 'network-config=' in cmdline: - data64 = None - for tok in cmdline.split(): - if tok.startswith("network-config="): - data64 = tok.split("=", 1)[1] - if data64: - return util.load_yaml(_b64dgz(data64)) - if not _is_initramfs_netconfig(files, cmdline): return None @@ -204,4 +196,19 @@ def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs) + +def read_kernel_cmdline_config(cmdline=None): + if cmdline is None: + cmdline = util.get_cmdline() + + if 'network-config=' in cmdline: + data64 = None + for tok in cmdline.split(): + if tok.startswith("network-config="): + data64 = tok.split("=", 1)[1] + if data64: + return util.load_yaml(_b64dgz(data64)) + + return None + # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 70b9c58a..76cfa38c 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -48,7 +48,7 @@ class DataSourceOracle(sources.DataSource): return False # network may be configured if iscsi root. If that is the case - # then read_kernel_cmdline_config will return non-None. + # then read_initramfs_config will return non-None. if _is_iscsi_root(): data = self.crawl_metadata() else: @@ -118,10 +118,8 @@ class DataSourceOracle(sources.DataSource): We nonetheless return cmdline provided config if present and fallback to generate fallback.""" if self._network_config == sources.UNSET: - cmdline_cfg = cmdline.read_kernel_cmdline_config() - if cmdline_cfg: - self._network_config = cmdline_cfg - else: + self._network_config = cmdline.read_initramfs_config() + if not self._network_config: self._network_config = self.distro.generate_fallback_config() return self._network_config @@ -137,7 +135,7 @@ def _is_platform_viable(): def _is_iscsi_root(): - return bool(cmdline.read_kernel_cmdline_config()) + return bool(cmdline.read_initramfs_config()) def _load_index(content): diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 9d249366..c2baccd5 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -69,7 +69,7 @@ CLOUD_ID_REGION_PREFIX_MAP = { # NetworkConfigSource represents the canonical list of network config sources # that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton # namedtuple as an enum; see https://stackoverflow.com/a/6971002) -_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback') +_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback', 'initramfs') NetworkConfigSource = namedtuple('NetworkConfigSource', _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) @@ -166,6 +166,7 @@ class DataSource(object): # should always be a subset of the members of NetworkConfigSource with no # duplicate entries. network_config_sources = (NetworkConfigSource.cmdline, + NetworkConfigSource.initramfs, NetworkConfigSource.system_cfg, NetworkConfigSource.ds) diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 97d62947..282382c5 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -133,9 +133,9 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertEqual(self.my_md['uuid'], ds.get_instance_id()) self.assertEqual(my_userdata, ds.userdata_raw) - @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) - def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config): + def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config): """network_config should read kernel cmdline.""" distro = mock.MagicMock() ds, _ = self._get_ds(distro=distro, patches={ @@ -145,15 +145,15 @@ class TestDataSourceOracle(test_helpers.CiTestCase): MD_VER: {'system_uuid': self.my_uuid, 'meta_data': self.my_md}}}}) ncfg = {'version': 1, 'config': [{'a': 'b'}]} - m_cmdline_config.return_value = ncfg + m_initramfs_config.return_value = ncfg self.assertTrue(ds._get_data()) self.assertEqual(ncfg, ds.network_config) - m_cmdline_config.assert_called_once_with() + self.assertEqual([mock.call()], m_initramfs_config.call_args_list) self.assertFalse(distro.generate_fallback_config.called) - @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) - def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config): + def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config): """test that fallback network is generated if no kernel cmdline.""" distro = mock.MagicMock() ds, _ = self._get_ds(distro=distro, patches={ @@ -163,18 +163,17 @@ class TestDataSourceOracle(test_helpers.CiTestCase): MD_VER: {'system_uuid': self.my_uuid, 'meta_data': self.my_md}}}}) ncfg = {'version': 1, 'config': [{'a': 'b'}]} - m_cmdline_config.return_value = None + m_initramfs_config.return_value = None self.assertTrue(ds._get_data()) ncfg = {'version': 1, 'config': [{'distro1': 'value'}]} distro.generate_fallback_config.return_value = ncfg self.assertEqual(ncfg, ds.network_config) - m_cmdline_config.assert_called_once_with() + self.assertEqual([mock.call()], m_initramfs_config.call_args_list) distro.generate_fallback_config.assert_called_once_with() - self.assertEqual(1, m_cmdline_config.call_count) # test that the result got cached, and the methods not re-called. self.assertEqual(ncfg, ds.network_config) - self.assertEqual(1, m_cmdline_config.call_count) + self.assertEqual(1, m_initramfs_config.call_count) @mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4())) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 6bcda2d1..50129884 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -633,6 +633,7 @@ class Init(object): available_cfgs = { NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(), + NetworkConfigSource.initramfs: cmdline.read_initramfs_config(), NetworkConfigSource.ds: None, NetworkConfigSource.system_cfg: self.cfg.get('network'), } diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index 7e13e29d..d5c9c0e4 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -59,20 +59,39 @@ class TestInit(CiTestCase): (None, disable_file), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_kernel(self, m_cmdline): + def test_wb__find_networking_config_disabled_by_kernel( + self, m_cmdline, m_initramfs): """find_networking_config returns when disabled by kernel cmdline.""" m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': ['fake_initrd']} self.assertEqual( (None, NetworkConfigSource.cmdline), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by cmdline\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_datasrc(self, m_cmdline): + def test_wb__find_networking_config_disabled_by_initrd( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by kernel cmdline.""" + m_cmdline.return_value = {} + m_initramfs.return_value = {'config': 'disabled'} + self.assertEqual( + (None, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by initramfs\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_datasrc( + self, m_cmdline, m_initramfs): """find_networking_config returns when disabled by datasource cfg.""" m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {}} # system config doesn't disable @@ -84,10 +103,13 @@ class TestInit(CiTestCase): self.assertEqual('DEBUG: network config disabled by ds\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_sysconfig(self, m_cmdline): + def test_wb__find_networking_config_disabled_by_sysconfig( + self, m_cmdline, m_initramfs): """find_networking_config returns when disabled by system config.""" m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {'config': 'disabled'}} self.assertEqual( @@ -96,27 +118,31 @@ class TestInit(CiTestCase): self.assertEqual('DEBUG: network config disabled by system_cfg\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test__find_networking_config_uses_datasrc_order(self, m_cmdline): + def test__find_networking_config_uses_datasrc_order( + self, m_cmdline, m_initramfs): """find_networking_config should check sources in DS defined order""" - # cmdline, which would normally be preferred over other sources, - # disables networking; in this case, though, the DS moves cmdline later - # so its own config is preferred + # cmdline and initramfs, which would normally be preferred over other + # sources, disable networking; in this case, though, the DS moves them + # later so its own config is preferred m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': 'disabled'} ds_net_cfg = {'config': {'needle': True}} self.init.datasource = FakeDataSource(network_config=ds_net_cfg) self.init.datasource.network_config_sources = [ NetworkConfigSource.ds, NetworkConfigSource.system_cfg, - NetworkConfigSource.cmdline] + NetworkConfigSource.cmdline, NetworkConfigSource.initramfs] self.assertEqual( (ds_net_cfg, NetworkConfigSource.ds), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') def test__find_networking_config_warns_if_datasrc_uses_invalid_src( - self, m_cmdline): + self, m_cmdline, m_initramfs): """find_networking_config should check sources in DS defined order""" ds_net_cfg = {'config': {'needle': True}} self.init.datasource = FakeDataSource(network_config=ds_net_cfg) @@ -130,9 +156,10 @@ class TestInit(CiTestCase): ' cfg_source: invalid_src', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( - self, m_cmdline): + self, m_cmdline, m_initramfs): """find_networking_config should check sources in DS defined order""" ds_net_cfg = {'config': {'needle': True}} self.init.datasource = FakeDataSource(network_config=ds_net_cfg) @@ -146,11 +173,14 @@ class TestInit(CiTestCase): ' cfg_source: fallback', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_kernel(self, m_cmdline): + def test_wb__find_networking_config_returns_kernel( + self, m_cmdline, m_initramfs): """find_networking_config returns kernel cmdline config if present.""" expected_cfg = {'config': ['fakekernel']} m_cmdline.return_value = expected_cfg + m_initramfs.return_value = {'config': ['fake_initrd']} self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {'config': ['fakesys_config']}} self.init.datasource = FakeDataSource( @@ -159,10 +189,29 @@ class TestInit(CiTestCase): (expected_cfg, NetworkConfigSource.cmdline), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_initramfs( + self, m_cmdline, m_initramfs): + """find_networking_config returns kernel cmdline config if present.""" + expected_cfg = {'config': ['fake_initrd']} + m_cmdline.return_value = {} + m_initramfs.return_value = expected_cfg + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {'config': ['fakesys_config']}} + self.init.datasource = FakeDataSource( + network_config={'config': ['fakedatasource']}) + self.assertEqual( + (expected_cfg, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_system_cfg(self, m_cmdline): + def test_wb__find_networking_config_returns_system_cfg( + self, m_cmdline, m_initramfs): """find_networking_config returns system config when present.""" m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config expected_cfg = {'config': ['fakesys_config']} self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': expected_cfg} @@ -172,10 +221,13 @@ class TestInit(CiTestCase): (expected_cfg, NetworkConfigSource.system_cfg), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_datasrc_cfg(self, m_cmdline): + def test_wb__find_networking_config_returns_datasrc_cfg( + self, m_cmdline, m_initramfs): """find_networking_config returns datasource net config if present.""" m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config # No system config for network in setUp expected_cfg = {'config': ['fakedatasource']} self.init.datasource = FakeDataSource(network_config=expected_cfg) @@ -183,10 +235,13 @@ class TestInit(CiTestCase): (expected_cfg, NetworkConfigSource.ds), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_fallback(self, m_cmdline): + def test_wb__find_networking_config_returns_fallback( + self, m_cmdline, m_initramfs): """find_networking_config returns fallback config if not defined.""" m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # no initramfs network config # Neither datasource nor system_info disable or provide network fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}], diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e2bbb847..1840ade0 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3558,13 +3558,13 @@ class TestCmdlineConfigParsing(CiTestCase): self.assertEqual(found, self.simple_cfg) -class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): +class TestCmdlineReadInitramfsConfig(FilesystemMockingTestCase): macs = { 'eth0': '14:02:ec:42:48:00', 'eno1': '14:02:ec:42:48:01', } - def test_ip_cmdline_without_ip(self): + def test_without_ip(self): content = {'/run/net-eth0.conf': DHCP_CONTENT_1, cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n"} exp1 = copy.deepcopy(DHCP_EXPECTED_1) @@ -3574,12 +3574,12 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( cmdline='foo root=/root/bar', mac_addrs=self.macs) self.assertEqual(found['version'], 1) self.assertEqual(found['config'], [exp1]) - def test_ip_cmdline_read_kernel_cmdline_ip(self): + def test_with_ip(self): content = {'/run/net-eth0.conf': DHCP_CONTENT_1} exp1 = copy.deepcopy(DHCP_EXPECTED_1) exp1['mac_address'] = self.macs['eth0'] @@ -3588,18 +3588,18 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( cmdline='foo ip=dhcp', mac_addrs=self.macs) self.assertEqual(found['version'], 1) self.assertEqual(found['config'], [exp1]) - def test_ip_cmdline_read_kernel_cmdline_ip6(self): + def test_with_ip6(self): content = {'/run/net6-eno1.conf': DHCP6_CONTENT_1} root = self.tmp_dir() populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( cmdline='foo ip6=dhcp root=/dev/sda', mac_addrs=self.macs) self.assertEqual( @@ -3611,15 +3611,15 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): {'dns_nameservers': ['2001:67c:1562:8010::2:1'], 'control': 'manual', 'type': 'dhcp6', 'netmask': '64'}]}]}) - def test_ip_cmdline_read_kernel_cmdline_none(self): + def test_with_no_ip_or_ip6(self): # if there is no ip= or ip6= on cmdline, return value should be None content = {'net6-eno1.conf': DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) self.assertIsNone(found) - def test_ip_cmdline_both_ip_ip6(self): + def test_with_both_ip_ip6(self): content = { '/run/net-eth0.conf': DHCP_CONTENT_1, '/run/net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')} @@ -3634,7 +3634,7 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( cmdline='foo ip=dhcp ip6=dhcp', mac_addrs=self.macs) self.assertEqual(found['version'], 1) -- cgit v1.2.3 From 4dfed67d0e82970f8717d0b524c593962698ca4f Mon Sep 17 00:00:00 2001 From: Chris Glass Date: Thu, 8 Aug 2019 17:09:57 +0000 Subject: New data source for the Exoscale.com cloud platform - dsidentify switches to the new Exoscale datasource on matching DMI name - New Exoscale datasource added Signed-off-by: Mathieu Corbin --- cloudinit/apport.py | 1 + cloudinit/settings.py | 1 + cloudinit/sources/DataSourceExoscale.py | 258 +++++++++++++++++++++++ doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/exoscale.rst | 68 ++++++ tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_exoscale.py | 203 ++++++++++++++++++ tools/ds-identify | 7 +- 8 files changed, 540 insertions(+), 1 deletion(-) create mode 100644 cloudinit/sources/DataSourceExoscale.py create mode 100644 doc/rtd/topics/datasources/exoscale.rst create mode 100644 tests/unittests/test_datasource/test_exoscale.py (limited to 'cloudinit/sources') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 22cb7fde..003ff1ff 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -23,6 +23,7 @@ KNOWN_CLOUD_NAMES = [ 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', + 'Exoscale', 'Hetzner Cloud', 'IBM - (aka SoftLayer or BlueMix)', 'LXD', diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b1ebaade..2060d81f 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -39,6 +39,7 @@ CFG_BUILTIN = { 'Hetzner', 'IBMCloud', 'Oracle', + 'Exoscale', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py new file mode 100644 index 00000000..52e7f6f6 --- /dev/null +++ b/cloudinit/sources/DataSourceExoscale.py @@ -0,0 +1,258 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import ec2_utils as ec2 +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import url_helper +from cloudinit import util + +LOG = logging.getLogger(__name__) + +METADATA_URL = "http://169.254.169.254" +API_VERSION = "1.0" +PASSWORD_SERVER_PORT = 8080 + +URL_TIMEOUT = 10 +URL_RETRIES = 6 + +EXOSCALE_DMI_NAME = "Exoscale" + +BUILTIN_DS_CONFIG = { + # We run the set password config module on every boot in order to enable + # resetting the instance's password via the exoscale console (and a + # subsequent instance reboot). + 'cloud_config_modules': [["set-passwords", "always"]] +} + + +class DataSourceExoscale(sources.DataSource): + + dsname = 'Exoscale' + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) + LOG.debug("Initializing the Exoscale datasource") + + self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL) + self.api_version = self.ds_cfg.get('api_version', API_VERSION) + self.password_server_port = int( + self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) + self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) + self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) + + self.extra_config = BUILTIN_DS_CONFIG + + def wait_for_metadata_service(self): + """Wait for the metadata service to be reachable.""" + + metadata_url = "{}/{}/meta-data/instance-id".format( + self.metadata_url, self.api_version) + + url = url_helper.wait_for_url( + urls=[metadata_url], + max_wait=self.url_max_wait, + timeout=self.url_timeout, + status_cb=LOG.critical) + + return bool(url) + + def crawl_metadata(self): + """ + Crawl the metadata service when available. + + @returns: Dictionary of crawled metadata content. + """ + metadata_ready = util.log_time( + logfunc=LOG.info, + msg='waiting for the metadata service', + func=self.wait_for_metadata_service) + + if not metadata_ready: + return {} + + return read_metadata(self.metadata_url, self.api_version, + self.password_server_port, self.url_timeout, + self.url_retries) + + def _get_data(self): + """Fetch the user data, the metadata and the VM password + from the metadata service. + + Please refer to the datasource documentation for details on how the + metadata server and password server are crawled. + """ + if not self._is_platform_viable(): + return False + + data = util.log_time( + logfunc=LOG.debug, + msg='Crawl of metadata service', + func=self.crawl_metadata) + + if not data: + return False + + self.userdata_raw = data['user-data'] + self.metadata = data['meta-data'] + password = data.get('password') + + password_config = {} + if password: + # Since we have a password, let's make sure we are allowed to use + # it by allowing ssh_pwauth. + # The password module's default behavior is to leave the + # configuration as-is in this regard, so that means it will either + # leave the password always disabled if no password is ever set, or + # leave the password login enabled if we set it once. + password_config = { + 'ssh_pwauth': True, + 'password': password, + 'chpasswd': { + 'expire': False, + }, + } + + # builtin extra_config overrides password_config + self.extra_config = util.mergemanydict( + [self.extra_config, password_config]) + + return True + + def get_config_obj(self): + return self.extra_config + + def _is_platform_viable(self): + return util.read_dmi_data('system-product-name').startswith( + EXOSCALE_DMI_NAME) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +def get_password(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Obtain the VM's password if set. + + Once fetched the password is marked saved. Future calls to this method may + return empty string or 'saved_password'.""" + password_url = "{}:{}/{}/".format(metadata_url, password_server_port, + api_version) + response = url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "send_my_password"}, + timeout=url_timeout, + retries=url_retries) + password = response.contents.decode('utf-8') + # the password is empty or already saved + # Note: the original metadata server would answer an additional + # 'bad_request' status, but the Exoscale implementation does not. + if password in ['', 'saved_password']: + return None + # save the password + url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "saved_password"}, + timeout=url_timeout, + retries=url_retries) + return password + + +def read_metadata(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Query the metadata server and return the retrieved data.""" + crawled_metadata = {} + crawled_metadata['_metadata_api_version'] = api_version + try: + crawled_metadata['user-data'] = ec2.get_instance_userdata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + crawled_metadata['meta-data'] = ec2.get_instance_metadata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + except Exception as e: + util.logexc(LOG, "failed reading from metadata url %s (%s)", + metadata_url, e) + return {} + + try: + crawled_metadata['password'] = get_password( + api_version=api_version, + metadata_url=metadata_url, + password_server_port=password_server_port, + url_retries=url_retries, + url_timeout=url_timeout) + except Exception as e: + util.logexc(LOG, "failed to read from password server url %s:%s (%s)", + metadata_url, password_server_port, e) + + return crawled_metadata + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='Query Exoscale Metadata') + parser.add_argument( + "--endpoint", + metavar="URL", + help="The url of the metadata service.", + default=METADATA_URL) + parser.add_argument( + "--version", + metavar="VERSION", + help="The version of the metadata endpoint to query.", + default=API_VERSION) + parser.add_argument( + "--retries", + metavar="NUM", + type=int, + help="The number of retries querying the endpoint.", + default=URL_RETRIES) + parser.add_argument( + "--timeout", + metavar="NUM", + type=int, + help="The time in seconds to wait before timing out.", + default=URL_TIMEOUT) + parser.add_argument( + "--password-port", + metavar="PORT", + type=int, + help="The port on which the password endpoint listens", + default=PASSWORD_SERVER_PORT) + + args = parser.parse_args() + + data = read_metadata( + metadata_url=args.endpoint, + api_version=args.version, + password_server_port=args.password_port, + url_timeout=args.timeout, + url_retries=args.retries) + + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 648c6068..2148cd5e 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -155,6 +155,7 @@ Follow for more information. datasources/configdrive.rst datasources/digitalocean.rst datasources/ec2.rst + datasources/exoscale.rst datasources/maas.rst datasources/nocloud.rst datasources/opennebula.rst diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst new file mode 100644 index 00000000..27aec9cd --- /dev/null +++ b/doc/rtd/topics/datasources/exoscale.rst @@ -0,0 +1,68 @@ +.. _datasource_exoscale: + +Exoscale +======== + +This datasource supports reading from the metadata server used on the +`Exoscale platform `_. + +Use of the Exoscale datasource is recommended to benefit from new features of +the Exoscale platform. + +The datasource relies on the availability of a compatible metadata server +(``http://169.254.169.254`` is used by default) and its companion password +server, reachable at the same address (by default on port 8080). + +Crawling of metadata +-------------------- + +The metadata service and password server are crawled slightly differently: + + * The "metadata service" is crawled every boot. + * The password server is also crawled every boot (the Exoscale datasource + forces the password module to run with "frequency always"). + +In the password server case, the following rules apply in order to enable the +"restore instance password" functionality: + + * If a password is returned by the password server, it is then marked "saved" + by the cloud-init datasource. Subsequent boots will skip setting the password + (the password server will return "saved_password"). + * When the instance password is reset (via the Exoscale UI), the password + server will return the non-empty password at next boot, therefore causing + cloud-init to reset the instance's password. + +Configuration +------------- + +Users of this datasource are discouraged from changing the default settings +unless instructed to by Exoscale support. + +The following settings are available and can be set for the datasource in system +configuration (in `/etc/cloud/cloud.cfg.d/`). + +The settings available are: + + * **metadata_url**: The URL for the metadata service (defaults to + ``http://169.254.169.254``) + * **api_version**: The API version path on which to query the instance metadata + (defaults to ``1.0``) + * **password_server_port**: The port (on the metadata server) on which the + password server listens (defaults to ``8080``). + * **timeout**: the timeout value provided to urlopen for each individual http + request. (defaults to ``10``) + * **retries**: The number of retries that should be done for an http request + (defaults to ``6``) + + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + Exoscale: + metadata_url: "http://169.254.169.254" + api_version: "1.0" + password_server_port: 8080 + timeout: 10 + retries: 6 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 2a9cfb29..61a7a762 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -13,6 +13,7 @@ from cloudinit.sources import ( DataSourceConfigDrive as ConfigDrive, DataSourceDigitalOcean as DigitalOcean, DataSourceEc2 as Ec2, + DataSourceExoscale as Exoscale, DataSourceGCE as GCE, DataSourceHetzner as Hetzner, DataSourceIBMCloud as IBMCloud, @@ -53,6 +54,7 @@ DEFAULT_NETWORK = [ CloudStack.DataSourceCloudStack, DSNone.DataSourceNone, Ec2.DataSourceEc2, + Exoscale.DataSourceExoscale, GCE.DataSourceGCE, MAAS.DataSourceMAAS, NoCloud.DataSourceNoCloudNet, diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py new file mode 100644 index 00000000..350c3304 --- /dev/null +++ b/tests/unittests/test_datasource/test_exoscale.py @@ -0,0 +1,203 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import helpers +from cloudinit.sources.DataSourceExoscale import ( + API_VERSION, + DataSourceExoscale, + METADATA_URL, + get_password, + PASSWORD_SERVER_PORT, + read_metadata) +from cloudinit.tests.helpers import HttprettyTestCase, mock + +import httpretty +import requests + + +TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, + PASSWORD_SERVER_PORT, + API_VERSION) + +TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, + API_VERSION) + +TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, + API_VERSION) + + +@httpretty.activate +class TestDatasourceExoscale(HttprettyTestCase): + + def setUp(self): + super(TestDatasourceExoscale, self).setUp() + self.tmp = self.tmp_dir() + self.password_url = TEST_PASSWORD_URL + self.metadata_url = TEST_METADATA_URL + self.userdata_url = TEST_USERDATA_URL + + def test_password_saved(self): + """The password is not set when it is not found + in the metadata service.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="saved_password") + self.assertFalse(get_password()) + + def test_password_empty(self): + """No password is set if the metadata service returns + an empty string.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="") + self.assertFalse(get_password()) + + def test_password(self): + """The password is set to what is found in the metadata + service.""" + expected_password = "p@ssw0rd" + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + password = get_password() + self.assertEqual(expected_password, password) + + def test_get_data(self): + """The datasource conforms to expected behavior when supplied + full test data.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_password = "p@ssw0rd" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'ssh_pwauth': True, + 'password': expected_password, + 'cloud_config_modules': [ + ["set-passwords", "always"]], + 'chpasswd': { + 'expire': False, + }}) + + def test_get_data_saved_password(self): + """The datasource conforms to expected behavior when saved_password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "saved_password" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'cloud_config_modules': [ + ["set-passwords", "always"]]}) + + def test_get_data_no_password(self): + """The datasource conforms to expected behavior when no password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'cloud_config_modules': [ + ["set-passwords", "always"]]}) + + @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') + def test_read_metadata_when_password_server_unreachable(self, m_password): + """The read_metadata function returns partial results in case the + password server (only) is unreachable.""" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + + m_password.side_effect = requests.Timeout('Fake Connection Timeout') + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + + result = read_metadata() + + self.assertIsNone(result.get("password")) + self.assertEqual(result.get("user-data").decode("utf-8"), + expected_userdata) + + def test_non_viable_platform(self): + """The datasource fails fast when the platform is not viable.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: False + self.assertFalse(ds._get_data()) diff --git a/tools/ds-identify b/tools/ds-identify index 0305e361..e0d4865c 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -553,6 +553,11 @@ dscheck_CloudStack() { return $DS_NOT_FOUND } +dscheck_Exoscale() { + dmi_product_name_matches "Exoscale*" && return $DS_FOUND + return $DS_NOT_FOUND +} + dscheck_CloudSigma() { # http://paste.ubuntu.com/23624795/ dmi_product_name_matches "CloudSigma" && return $DS_FOUND -- cgit v1.2.3 From 155847209e6a3ed5face91a133d8488a703f3f93 Mon Sep 17 00:00:00 2001 From: Rick Wright Date: Fri, 9 Aug 2019 17:11:05 +0000 Subject: Add support for publishing host keys to GCE guest attributes This adds an empty publish_host_keys() method to the default datasource that is called by cc_ssh.py. This feature can be controlled by the 'ssh_publish_hostkeys' config option. It is enabled by default but can be disabled by setting 'enabled' to false. Also, a blacklist of key types is supported. In addition, this change implements ssh_publish_hostkeys() for the GCE datasource, attempting to write the hostkeys to the instance's guest attributes. Using these hostkeys for ssh connections is currently supported by the alpha version of Google's 'gcloud' command-line tool. (On Google Compute Engine, this feature will be enabled by setting the 'enable-guest-attributes' metadata key to 'true' for the project/instance that you would like to use this feature for. When connecting to the instance for the first time using 'gcloud compute ssh' the hostkeys will be read from the guest attributes for the instance and written to the user's local known_hosts file for Google Compute Engine instances.) --- cloudinit/config/cc_ssh.py | 55 +++++++++ cloudinit/config/tests/test_ssh.py | 166 ++++++++++++++++++++++++++++ cloudinit/sources/DataSourceGCE.py | 22 +++- cloudinit/sources/__init__.py | 10 ++ cloudinit/url_helper.py | 9 +- tests/unittests/test_datasource/test_gce.py | 18 +++ 6 files changed, 274 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index f8f7cb35..53f69399 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -91,6 +91,9 @@ public keys. ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ... - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ... + ssh_publish_hostkeys: + enabled: (Defaults to true) + blacklist: (Defaults to [dsa]) """ import glob @@ -104,6 +107,10 @@ from cloudinit import util GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' +PUBLISH_HOST_KEYS = True +# Don't publish the dsa hostkey by default since OpenSSH recommends not using +# it. +HOST_KEY_PUBLISH_BLACKLIST = ['dsa'] CONFIG_KEY_TO_FILE = {} PRIV_TO_PUB = {} @@ -176,6 +183,23 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Failed generating key type %s to " "file %s", keytype, keyfile) + if "ssh_publish_hostkeys" in cfg: + host_key_blacklist = util.get_cfg_option_list( + cfg["ssh_publish_hostkeys"], "blacklist", + HOST_KEY_PUBLISH_BLACKLIST) + publish_hostkeys = util.get_cfg_option_bool( + cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS) + else: + host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST + publish_hostkeys = PUBLISH_HOST_KEYS + + if publish_hostkeys: + hostkeys = get_public_host_keys(blacklist=host_key_blacklist) + try: + cloud.datasource.publish_host_keys(hostkeys) + except Exception as e: + util.logexc(log, "Publishing host keys failed!") + try: (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) @@ -209,4 +233,35 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): ssh_util.setup_user_keys(keys, 'root', options=key_prefix) + +def get_public_host_keys(blacklist=None): + """Read host keys from /etc/ssh/*.pub files and return them as a list. + + @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa'] + @returns: List of keys, each formatted as a two-element tuple. + e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')] + """ + public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,) + key_list = [] + blacklist_files = [] + if blacklist: + # Convert blacklist to filenames: + # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub' + blacklist_files = [public_key_file_tmpl % (key_type,) + for key_type in blacklist] + # Get list of public key files and filter out blacklisted files. + file_list = [hostfile for hostfile + in glob.glob(public_key_file_tmpl % ('*',)) + if hostfile not in blacklist_files] + + # Read host key files, retrieve first two fields as a tuple and + # append that tuple to key_list. + for file_name in file_list: + file_contents = util.load_file(file_name) + key_data = file_contents.split() + if key_data and len(key_data) > 1: + key_list.append(tuple(key_data[:2])) + return key_list + + # vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py index c8a4271f..e7789842 100644 --- a/cloudinit/config/tests/test_ssh.py +++ b/cloudinit/config/tests/test_ssh.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import os.path from cloudinit.config import cc_ssh from cloudinit import ssh_util @@ -12,6 +13,25 @@ MODPATH = "cloudinit.config.cc_ssh." class TestHandleSsh(CiTestCase): """Test cc_ssh handling of ssh config.""" + def _publish_hostkey_test_setup(self): + self.test_hostkeys = { + 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), + 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), + 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), + 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), + } + self.test_hostkey_files = [] + hostkey_tmpdir = self.tmp_dir() + for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']: + key_data = self.test_hostkeys[key_type] + filename = 'ssh_host_%s_key.pub' % key_type + filepath = os.path.join(hostkey_tmpdir, filename) + self.test_hostkey_files.append(filepath) + with open(filepath, 'w') as f: + f.write(' '.join(key_data)) + + cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') + def test_apply_credentials_with_user(self, m_setup_keys): """Apply keys for the given user and root.""" keys = ["key1"] @@ -64,6 +84,7 @@ class TestHandleSsh(CiTestCase): # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ([], {}) + cc_ssh.PUBLISH_HOST_KEYS = False cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) cc_ssh.handle("name", cfg, cloud, None, None) @@ -149,3 +170,148 @@ class TestHandleSsh(CiTestCase): self.assertEqual([mock.call(set(keys), user), mock.call(set(keys), "root", options="")], m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_default( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_enable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = False + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_disable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': False}} + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) + cloud.datasource.publish_host_keys.assert_not_called() + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': ['dsa', 'rsa']}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_empty_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': []}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['dsa', 'ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index d8162623..6cbfbbac 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -18,10 +18,13 @@ LOG = logging.getLogger(__name__) MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/' BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL} REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes') +HOSTKEY_NAMESPACE = 'hostkeys' +HEADERS = {'Metadata-Flavor': 'Google'} class GoogleMetadataFetcher(object): - headers = {'Metadata-Flavor': 'Google'} def __init__(self, metadata_address): self.metadata_address = metadata_address @@ -32,7 +35,7 @@ class GoogleMetadataFetcher(object): url = self.metadata_address + path if is_recursive: url += '/?recursive=True' - resp = url_helper.readurl(url=url, headers=self.headers) + resp = url_helper.readurl(url=url, headers=HEADERS) except url_helper.UrlError as exc: msg = "url %s raised exception %s" LOG.debug(msg, path, exc) @@ -90,6 +93,10 @@ class DataSourceGCE(sources.DataSource): public_keys_data = self.metadata['public-keys-data'] return _parse_public_keys(public_keys_data, self.default_user) + def publish_host_keys(self, hostkeys): + for key in hostkeys: + _write_host_key_to_guest_attributes(*key) + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): # GCE has long FDQN's and has asked for short hostnames. return self.metadata['local-hostname'].split('.')[0] @@ -103,6 +110,17 @@ class DataSourceGCE(sources.DataSource): return self.availability_zone.rsplit('-', 1)[0] +def _write_host_key_to_guest_attributes(key_type, key_value): + url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type) + key_value = key_value.encode('utf-8') + resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS, + request_method='PUT', check_status=False) + if resp.ok(): + LOG.debug('Wrote %s host key to guest attributes.', key_type) + else: + LOG.debug('Unable to write %s host key to guest attributes.', key_type) + + def _has_expired(public_key): # Check whether an SSH key is expired. Public key input is a single SSH # public key in the GCE specific key format documented here: diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index c2baccd5..a319322b 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -491,6 +491,16 @@ class DataSource(object): def get_public_ssh_keys(self): return normalize_pubkey_data(self.metadata.get('public-keys')) + def publish_host_keys(self, hostkeys): + """Publish the public SSH host keys (found in /etc/ssh/*.pub). + + @param hostkeys: List of host key tuples (key_type, key_value), + where key_type is the first field in the public key file + (e.g. 'ssh-rsa') and key_value is the key itself + (e.g. 'AAAAB3NzaC1y...'). + """ + pass + def _remap_device(self, short_name): # LP: #611137 # the metadata service may believe that devices are named 'sda' diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0af0d9e3..44ee61d4 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -199,18 +199,19 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, check_status=True, allow_redirects=True, exception_cb=None, - session=None, infinite=False, log_req_resp=True): + session=None, infinite=False, log_req_resp=True, + request_method=None): url = _cleanurl(url) req_args = { 'url': url, } req_args.update(_get_ssl_args(url, ssl_details)) req_args['allow_redirects'] = allow_redirects - req_args['method'] = 'GET' + if not request_method: + request_method = 'POST' if data else 'GET' + req_args['method'] = request_method if timeout is not None: req_args['timeout'] = max(float(timeout), 0) - if data: - req_args['method'] = 'POST' # It doesn't seem like config # was added in older library versions (or newer ones either), thus we # need to manually do the retries if it wasn't... diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 41176c6a..67744d32 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -55,6 +55,8 @@ GCE_USER_DATA_TEXT = { HEADERS = {'Metadata-Flavor': 'Google'} MD_URL_RE = re.compile( r'http://metadata.google.internal/computeMetadata/v1/.*') +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes/hostkeys/') def _set_mock_metadata(gce_meta=None): @@ -341,4 +343,20 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): public_key_data, default_user='default') self.assertEqual(sorted(found), sorted(expected)) + @mock.patch("cloudinit.url_helper.readurl") + def test_publish_host_keys(self, m_readurl): + hostkeys = [('ssh-rsa', 'asdfasdf'), + ('ssh-ed25519', 'qwerqwer')] + readurl_expected_calls = [ + mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), + mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), + ] + self.ds.publish_host_keys(hostkeys) + m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 7f674256c1426ffc419fd6b13e66a58754d94939 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 13 Aug 2019 20:13:05 +0000 Subject: azure/net: generate_fallback_nic emits network v2 config instead of v1 The function generate_fallback_config is used by Azure by default when not consuming IMDS configuration data. This function is also used by any datasource which does not implement it's own network config. This simple fallback configuration sets up dhcp on the most likely NIC. It will now emit network v2 instead of network v1. This is a step toward moving all components talking in v2 and allows us to avoid costly conversions between v1 and v2 for newer distributions which rely on netplan. --- cloudinit/net/__init__.py | 31 +++++--------- cloudinit/net/network_state.py | 12 ++++-- cloudinit/net/tests/test_init.py | 19 +++++---- cloudinit/sources/DataSourceAzure.py | 7 +++- tests/unittests/test_datasource/test_azure.py | 59 ++++++++++++++++++++++++++- tests/unittests/test_net.py | 41 +++++++++++++++++-- 6 files changed, 130 insertions(+), 39 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index f3cec794..ea707c09 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -265,32 +265,23 @@ def find_fallback_nic(blacklist_drivers=None): def generate_fallback_config(blacklist_drivers=None, config_driver=None): - """Determine which attached net dev is most likely to have a connection and - generate network state to run dhcp on that interface""" - + """Generate network cfg v2 for dhcp on the NIC most likely connected.""" if not config_driver: config_driver = False target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers) - if target_name: - target_mac = read_sys_net_safe(target_name, 'address') - nconf = {'config': [], 'version': 1} - cfg = {'type': 'physical', 'name': target_name, - 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} - # inject the device driver name, dev_id into config if enabled and - # device has a valid device driver value - if config_driver: - driver = device_driver(target_name) - if driver: - cfg['params'] = { - 'driver': driver, - 'device_id': device_devid(target_name), - } - nconf['config'].append(cfg) - return nconf - else: + if not target_name: # can't read any interfaces addresses (or there are none); give up return None + target_mac = read_sys_net_safe(target_name, 'address') + cfg = {'dhcp4': True, 'set-name': target_name, + 'match': {'macaddress': target_mac.lower()}} + if config_driver: + driver = device_driver(target_name) + if driver: + cfg['match']['driver'] = driver + nconf = {'ethernets': {target_name: cfg}, 'version': 2} + return nconf def extract_physdevs(netcfg): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 0ca576b6..c0c415d0 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -596,6 +596,7 @@ class NetworkStateInterpreter(object): eno1: match: macaddress: 00:11:22:33:44:55 + driver: hv_netsvc wakeonlan: true dhcp4: true dhcp6: false @@ -631,15 +632,18 @@ class NetworkStateInterpreter(object): 'type': 'physical', 'name': cfg.get('set-name', eth), } - mac_address = cfg.get('match', {}).get('macaddress', None) + match = cfg.get('match', {}) + mac_address = match.get('macaddress', None) if not mac_address: LOG.debug('NetworkState Version2: missing "macaddress" info ' 'in config entry: %s: %s', eth, str(cfg)) - phy_cmd.update({'mac_address': mac_address}) - + phy_cmd['mac_address'] = mac_address + driver = match.get('driver', None) + if driver: + phy_cmd['params'] = {'driver': driver} for key in ['mtu', 'match', 'wakeonlan']: if key in cfg: - phy_cmd.update({key: cfg.get(key)}) + phy_cmd[key] = cfg[key] subnets = self._v2_to_v1_ipcfg(cfg) if len(subnets) > 0: diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index e6e77d7a..d2e38f00 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -212,9 +212,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth1', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth1': {'match': {'macaddress': mac}, + 'dhcp4': True, 'set-name': 'eth1'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_dormant_eth_with_mac(self): @@ -223,9 +223,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True, + 'set-name': 'eth0'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_eth_by_operstate(self): @@ -233,9 +233,10 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': { + 'eth0': {'dhcp4': True, 'match': {'macaddress': mac}, + 'set-name': 'eth0'}}, + 'version': 2} valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown'] for state in valid_operstates: write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d2fad9bb..e6ed2f3b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1241,7 +1241,7 @@ def parse_network_config(imds_metadata): privateIpv4 = addr4['privateIpAddress'] if privateIpv4: if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 + # Append static address config for ip > 1 netPrefix = intf['ipv4']['subnet'][0].get( 'prefix', '24') if not dev_config.get('addresses'): @@ -1251,6 +1251,11 @@ def parse_network_config(imds_metadata): ip=privateIpv4, prefix=netPrefix)) else: dev_config['dhcp4'] = True + # non-primary interfaces should have a higher + # route-metric (cost) so default routes prefer + # primary nic due to lower route-metric value + dev_config['dhcp4-overrides'] = { + 'route-metric': (idx + 1) * 100} for addr6 in intf['ipv6']['ipAddress']: privateIpv6 = addr6['privateIpAddress'] if privateIpv6: diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 2de2aea2..4d57cebc 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -12,6 +12,7 @@ from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, ExitStack, resourceLocation) +import copy import crypt import httpretty import json @@ -129,6 +130,26 @@ NETWORK_METADATA = { } } +SECONDARY_INTERFACE = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.1.5", + } + ] + } +} + MOCKPATH = 'cloudinit.sources.DataSourceAzure.' @@ -619,8 +640,43 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp4': True}}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + def test_network_config_set_from_imds_route_metric_for_secondary_nic(self): + """Datasource.network_config adds route-metric to secondary nics.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + + self.m_get_metadata_from_imds.return_value = imds_data dsrc = self._get_ds(data) dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) @@ -925,6 +981,7 @@ scbus-1 on xpt0 bus 0 expected_cfg = { 'ethernets': { 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}}, 'version': 2} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 1840ade0..4f7e4207 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2156,7 +2156,7 @@ DEFAULT_DEV_ATTRS = { "carrier": False, "dormant": False, "operstate": "down", - "address": "07-1C-C6-75-A4-BE", + "address": "07-1c-c6-75-a4-be", "device/driver": None, "device/device": None, "name_assign_type": "4", @@ -2204,6 +2204,39 @@ class TestGenerateFallbackConfig(CiTestCase): "cloudinit.util.get_cmdline", "m_get_cmdline", return_value="root=/dev/sda1") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_device_driver_v2(self, mock_get_devicelist, mock_read_sys_net, + mock_sys_dev_path): + """Network configuration for generate_fallback_config is version 2.""" + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': '4'}, + 'eth1': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + + } + + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + + network_cfg = net.generate_fallback_config(config_driver=True) + expected = { + 'ethernets': {'eth0': {'dhcp4': True, 'set-name': 'eth0', + 'match': {'macaddress': '00:11:22:33:44:55', + 'driver': 'hv_netsvc'}}}, + 'version': 2} + self.assertEqual(expected, network_cfg) + @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") @@ -2486,7 +2519,7 @@ class TestRhelSysConfigRendering(CiTestCase): # BOOTPROTO=dhcp DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE +HWADDR=07-1c-c6-75-a4-be NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -3030,7 +3063,7 @@ class TestOpenSuseSysConfigRendering(CiTestCase): # BOOTPROTO=dhcp DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE +HWADDR=07-1c-c6-75-a4-be NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -3342,13 +3375,13 @@ class TestNetplanNetRendering(CiTestCase): expected = """ network: - version: 2 ethernets: eth1000: dhcp4: true match: macaddress: 07-1c-c6-75-a4-be set-name: eth1000 + version: 2 """ self.assertEqual(expected.lstrip(), contents.lstrip()) self.assertEqual(1, mock_clean_default.call_count) -- cgit v1.2.3 From 0e79a1b89287358a77fe31fb82c4bcd83ff48894 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 14 Aug 2019 20:44:50 +0000 Subject: DataSourceOracle: configure secondary NICs on Virtual Machines Oracle Cloud Infrastructure's Instance Metadata Service provides network configuration information for non-primary NICs. This commit introduces support, on Virtual Machines[0], for fetching that network metadata, converting it to v1 network-config[1] and combining it into the network configuration generated for the primary interface. By default, this behaviour is not enabled. Configuring the Oracle datasource to `configure_secondary_nics` enables it: datasource: Oracle: configure_secondary_nics: true Failures to fetch and generate secondary NIC configuration will log a warning, but otherwise will not affect boot. [0] The expected use of the IMDS-provided network configuration is substantially different on Bare Metal Machines, so support for that will be addressed separately. [1] This is v1 config, because cloudinit.net.cmdline generates v1 config and we need to integrate the secondary NICs into that configuration. --- cloudinit/sources/DataSourceOracle.py | 89 +++++++++++++- cloudinit/sources/tests/test_oracle.py | 214 ++++++++++++++++++++++++++++++++- doc/rtd/topics/datasources/oracle.rst | 25 +++- 3 files changed, 324 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 76cfa38c..086af799 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -16,7 +16,7 @@ Notes: """ from cloudinit.url_helper import combine_url, readurl, UrlError -from cloudinit.net import dhcp +from cloudinit.net import dhcp, get_interfaces_by_mac from cloudinit import net from cloudinit import sources from cloudinit import util @@ -28,8 +28,80 @@ import re LOG = logging.getLogger(__name__) +BUILTIN_DS_CONFIG = { + # Don't use IMDS to configure secondary NICs by default + 'configure_secondary_nics': False, +} CHASSIS_ASSET_TAG = "OracleCloud.com" METADATA_ENDPOINT = "http://169.254.169.254/openstack/" +VNIC_METADATA_URL = 'http://169.254.169.254/opc/v1/vnics/' +# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview, +# indicates that an MTU of 9000 is used within OCI +MTU = 9000 + + +def _add_network_config_from_opc_imds(network_config): + """ + Fetch data from Oracle's IMDS, generate secondary NIC config, merge it. + + The primary NIC configuration should not be modified based on the IMDS + values, as it should continue to be configured for DHCP. As such, this + takes an existing network_config dict which is expected to have the primary + NIC configuration already present. It will mutate the given dict to + include the secondary VNICs. + + :param network_config: + A v1 network config dict with the primary NIC already configured. This + dict will be mutated. + + :raises: + Exceptions are not handled within this function. Likely exceptions are + those raised by url_helper.readurl (if communicating with the IMDS + fails), ValueError/JSONDecodeError (if the IMDS returns invalid JSON), + and KeyError/IndexError (if the IMDS returns valid JSON with unexpected + contents). + """ + resp = readurl(VNIC_METADATA_URL) + vnics = json.loads(str(resp)) + + if 'nicIndex' in vnics[0]: + # TODO: Once configure_secondary_nics defaults to True, lower the level + # of this log message. (Currently, if we're running this code at all, + # someone has explicitly opted-in to secondary VNIC configuration, so + # we should warn them that it didn't happen. Once it's default, this + # would be emitted on every Bare Metal Machine launch, which means INFO + # or DEBUG would be more appropriate.) + LOG.warning( + 'VNIC metadata indicates this is a bare metal machine; skipping' + ' secondary VNIC configuration.' + ) + return + + interfaces_by_mac = get_interfaces_by_mac() + + for vnic_dict in vnics[1:]: + # We skip the first entry in the response because the primary interface + # is already configured by iSCSI boot; applying configuration from the + # IMDS is not required. + mac_address = vnic_dict['macAddr'].lower() + if mac_address not in interfaces_by_mac: + LOG.debug('Interface with MAC %s not found; skipping', mac_address) + continue + name = interfaces_by_mac[mac_address] + subnet = { + 'type': 'static', + 'address': vnic_dict['privateIp'], + 'netmask': vnic_dict['subnetCidrBlock'].split('/')[1], + 'gateway': vnic_dict['virtualRouterIp'], + 'control': 'manual', + } + network_config['config'].append({ + 'name': name, + 'type': 'physical', + 'mac_address': mac_address, + 'mtu': MTU, + 'subnets': [subnet], + }) class DataSourceOracle(sources.DataSource): @@ -39,6 +111,13 @@ class DataSourceOracle(sources.DataSource): vendordata_pure = None _network_config = sources.UNSET + def __init__(self, sys_cfg, *args, **kwargs): + super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs) + + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}), + BUILTIN_DS_CONFIG]) + def _is_platform_viable(self): """Check platform environment to report if this datasource may run.""" return _is_platform_viable() @@ -121,6 +200,14 @@ class DataSourceOracle(sources.DataSource): self._network_config = cmdline.read_initramfs_config() if not self._network_config: self._network_config = self.distro.generate_fallback_config() + if self.ds_cfg.get('configure_secondary_nics'): + try: + # Mutate self._network_config to include secondary VNICs + _add_network_config_from_opc_imds(self._network_config) + except Exception: + util.logexc( + LOG, + "Failed to fetch secondary network configuration!") return self._network_config diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 282382c5..3e146776 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -18,10 +18,52 @@ import uuid DS_PATH = "cloudinit.sources.DataSourceOracle" MD_VER = "2013-10-17" +# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine +# with a secondary VNIC attached (vnicId truncated for Python line length) +OPC_BM_SECONDARY_VNIC_RESPONSE = """\ +[ { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||", + "privateIp" : "10.0.0.8", + "vlanTag" : 0, + "macAddr" : "90:e2:ba:d4:f1:68", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24", + "nicIndex" : 0 +}, { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||", + "privateIp" : "10.0.4.5", + "vlanTag" : 1, + "macAddr" : "02:00:17:05:CF:51", + "virtualRouterIp" : "10.0.4.1", + "subnetCidrBlock" : "10.0.4.0/24", + "nicIndex" : 0 +} ]""" + +# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine +# with a secondary VNIC attached +OPC_VM_SECONDARY_VNIC_RESPONSE = """\ +[ { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated", + "privateIp" : "10.0.0.230", + "vlanTag" : 1039, + "macAddr" : "02:00:17:05:D1:DB", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24" +}, { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated", + "privateIp" : "10.0.0.231", + "vlanTag" : 1041, + "macAddr" : "00:00:17:02:2B:B1", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24" +} ]""" + class TestDataSourceOracle(test_helpers.CiTestCase): """Test datasource DataSourceOracle.""" + with_logs = True + ds_class = oracle.DataSourceOracle my_uuid = str(uuid.uuid4()) @@ -79,6 +121,16 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertEqual( 'metadata (http://169.254.169.254/openstack/)', ds.subplatform) + def test_sys_cfg_can_enable_configure_secondary_nics(self): + # Confirm that behaviour is toggled by sys_cfg + ds, _mocks = self._get_ds() + self.assertFalse(ds.ds_cfg['configure_secondary_nics']) + + sys_cfg = { + 'datasource': {'Oracle': {'configure_secondary_nics': True}}} + ds, _mocks = self._get_ds(sys_cfg=sys_cfg) + self.assertTrue(ds.ds_cfg['configure_secondary_nics']) + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) def test_without_userdata(self, m_is_iscsi_root): """If no user-data is provided, it should not be in return dict.""" @@ -133,9 +185,12 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertEqual(self.my_md['uuid'], ds.get_instance_id()) self.assertEqual(my_userdata, ds.userdata_raw) + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds", + side_effect=lambda network_config: network_config) @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) - def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config): + def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config, + _m_add_network_config_from_opc_imds): """network_config should read kernel cmdline.""" distro = mock.MagicMock() ds, _ = self._get_ds(distro=distro, patches={ @@ -151,9 +206,12 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertEqual([mock.call()], m_initramfs_config.call_args_list) self.assertFalse(distro.generate_fallback_config.called) + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds", + side_effect=lambda network_config: network_config) @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) - def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config): + def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config, + _m_add_network_config_from_opc_imds): """test that fallback network is generated if no kernel cmdline.""" distro = mock.MagicMock() ds, _ = self._get_ds(distro=distro, patches={ @@ -175,6 +233,76 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertEqual(ncfg, ds.network_config) self.assertEqual(1, m_initramfs_config.call_count) + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config", + return_value={'some': 'config'}) + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_secondary_nics_added_to_network_config_if_enabled( + self, _m_is_iscsi_root, _m_initramfs_config, + m_add_network_config_from_opc_imds): + + needle = object() + + def network_config_side_effect(network_config): + network_config['secondary_added'] = needle + + m_add_network_config_from_opc_imds.side_effect = ( + network_config_side_effect) + + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + ds.ds_cfg['configure_secondary_nics'] = True + self.assertEqual(needle, ds.network_config['secondary_added']) + + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config", + return_value={'some': 'config'}) + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_secondary_nics_not_added_to_network_config_by_default( + self, _m_is_iscsi_root, _m_initramfs_config, + m_add_network_config_from_opc_imds): + + def network_config_side_effect(network_config): + network_config['secondary_added'] = True + + m_add_network_config_from_opc_imds.side_effect = ( + network_config_side_effect) + + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + self.assertNotIn('secondary_added', ds.network_config) + + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_secondary_nic_failure_isnt_blocking( + self, _m_is_iscsi_root, m_initramfs_config, + m_add_network_config_from_opc_imds): + + m_add_network_config_from_opc_imds.side_effect = Exception() + + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + ds.ds_cfg['configure_secondary_nics'] = True + self.assertEqual(ds.network_config, m_initramfs_config.return_value) + self.assertIn('Failed to fetch secondary network configuration', + self.logs.getvalue()) + @mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4())) class TestReadMetaData(test_helpers.HttprettyTestCase): @@ -335,4 +463,86 @@ class TestLoadIndex(test_helpers.CiTestCase): oracle._load_index("\n".join(["meta_data.json", "user_data"]))) +class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase): + + with_logs = True + + def setUp(self): + super(TestNetworkConfigFromOpcImds, self).setUp() + self.add_patch(DS_PATH + '.readurl', 'm_readurl') + self.add_patch(DS_PATH + '.get_interfaces_by_mac', + 'm_get_interfaces_by_mac') + + def test_failure_to_readurl(self): + # readurl failures should just bubble out to the caller + self.m_readurl.side_effect = Exception('oh no') + with self.assertRaises(Exception) as excinfo: + oracle._add_network_config_from_opc_imds({}) + self.assertEqual(str(excinfo.exception), 'oh no') + + def test_empty_response(self): + # empty response error should just bubble out to the caller + self.m_readurl.return_value = '' + with self.assertRaises(Exception): + oracle._add_network_config_from_opc_imds([]) + + def test_invalid_json(self): + # invalid JSON error should just bubble out to the caller + self.m_readurl.return_value = '{' + with self.assertRaises(Exception): + oracle._add_network_config_from_opc_imds([]) + + def test_no_secondary_nics_does_not_mutate_input(self): + self.m_readurl.return_value = json.dumps([{}]) + # We test this by passing in a non-dict to ensure that no dict + # operations are used; failure would be seen as exceptions + oracle._add_network_config_from_opc_imds(object()) + + def test_bare_metal_machine_skipped(self): + # nicIndex in the first entry indicates a bare metal machine + self.m_readurl.return_value = OPC_BM_SECONDARY_VNIC_RESPONSE + # We test this by passing in a non-dict to ensure that no dict + # operations are used + self.assertFalse(oracle._add_network_config_from_opc_imds(object())) + self.assertIn('bare metal machine', self.logs.getvalue()) + + def test_missing_mac_skipped(self): + self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE + self.m_get_interfaces_by_mac.return_value = {} + + network_config = {'version': 1, 'config': [{'primary': 'nic'}]} + oracle._add_network_config_from_opc_imds(network_config) + + self.assertEqual(1, len(network_config['config'])) + self.assertIn( + 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping', + self.logs.getvalue()) + + def test_secondary_nic(self): + self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + + network_config = {'version': 1, 'config': [{'primary': 'nic'}]} + oracle._add_network_config_from_opc_imds(network_config) + + # The input is mutated + self.assertEqual(2, len(network_config['config'])) + + secondary_nic_cfg = network_config['config'][1] + self.assertEqual(nic_name, secondary_nic_cfg['name']) + self.assertEqual('physical', secondary_nic_cfg['type']) + self.assertEqual(mac_addr, secondary_nic_cfg['mac_address']) + self.assertEqual(9000, secondary_nic_cfg['mtu']) + + self.assertEqual(1, len(secondary_nic_cfg['subnets'])) + subnet_cfg = secondary_nic_cfg['subnets'][0] + # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE + self.assertEqual('10.0.0.231', subnet_cfg['address']) + self.assertEqual('24', subnet_cfg['netmask']) + self.assertEqual('10.0.0.1', subnet_cfg['gateway']) + self.assertEqual('manual', subnet_cfg['control']) + # vi: ts=4 expandtab diff --git a/doc/rtd/topics/datasources/oracle.rst b/doc/rtd/topics/datasources/oracle.rst index f2383cee..98c4657c 100644 --- a/doc/rtd/topics/datasources/oracle.rst +++ b/doc/rtd/topics/datasources/oracle.rst @@ -8,7 +8,7 @@ This datasource reads metadata, vendor-data and user-data from Oracle Platform --------------- -OCI provides bare metal and virtual machines. In both cases, +OCI provides bare metal and virtual machines. In both cases, the platform identifies itself via DMI data in the chassis asset tag with the string 'OracleCloud.com'. @@ -22,5 +22,28 @@ Cloud-init has a specific datasource for Oracle in order to: implementation. +Configuration +------------- + +The following configuration can be set for the datasource in system +configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). + +The settings that may be configured are: + +* **configure_secondary_nics**: A boolean, defaulting to False. If set + to True on an OCI Virtual Machine, cloud-init will fetch networking + metadata from Oracle's IMDS and use it to configure the non-primary + network interface controllers in the system. If set to True on an + OCI Bare Metal Machine, it will have no effect (though this may + change in the future). + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + Oracle: + configure_secondary_nics: false + .. _Oracle Compute Infrastructure: https://cloud.oracle.com/ .. vi: textwidth=78 -- cgit v1.2.3 From 2f3bb764626b9065f4102c7c0a67998a9c174444 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 14 Aug 2019 21:03:13 +0000 Subject: Azure: Record boot timestamps, system information, and diagnostic events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Collect and record the following information through KVP:  + timestamps related to kernel initialization and systemd activation    of cloud-init services  + system information including cloud-init version, kernel version,    distro version, and python version  + diagnostic events for the most common provisioning error issues    such as empty dhcp lease, corrupted ovf-env.xml, etc. + increasing the log frequency of polling IMDS during reprovision. --- cloudinit/sources/DataSourceAzure.py | 157 ++++++++++++++++++++----- cloudinit/sources/helpers/azure.py | 160 ++++++++++++++++++++++++-- tests/unittests/test_datasource/test_azure.py | 15 ++- tests/unittests/test_reporting_hyperv.py | 65 +++++++++++ 4 files changed, 353 insertions(+), 44 deletions(-) mode change 100755 => 100644 tests/unittests/test_reporting_hyperv.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e6ed2f3b..4984fa84 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -26,9 +26,14 @@ from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util from cloudinit.reporting import events -from cloudinit.sources.helpers.azure import (azure_ds_reporter, - azure_ds_telemetry_reporter, - get_metadata_from_fabric) +from cloudinit.sources.helpers.azure import ( + azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric, + get_boot_telemetry, + get_system_info, + report_diagnostic_event, + EphemeralDHCPv4WithReporting) LOG = logging.getLogger(__name__) @@ -354,7 +359,7 @@ class DataSourceAzure(sources.DataSource): bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] LOG.debug("ssh authentication: " - "using fingerprint from fabirc") + "using fingerprint from fabric") with events.ReportEventStack( name="waiting-for-ssh-public-key", @@ -419,12 +424,17 @@ class DataSourceAzure(sources.DataSource): ret = load_azure_ds_dir(cdev) except NonAzureDataSource: + report_diagnostic_event( + "Did not find Azure data source in %s" % cdev) continue except BrokenAzureDataSource as exc: msg = 'BrokenAzureDataSource: %s' % exc + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) except util.MountFailedError: - LOG.warning("%s was not mountable", cdev) + msg = '%s was not mountable' % cdev + report_diagnostic_event(msg) + LOG.warning(msg) continue perform_reprovision = reprovision or self._should_reprovision(ret) @@ -432,6 +442,7 @@ class DataSourceAzure(sources.DataSource): if util.is_FreeBSD(): msg = "Free BSD is not supported for PPS VMs" LOG.error(msg) + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( @@ -450,7 +461,9 @@ class DataSourceAzure(sources.DataSource): break if not found: - raise sources.InvalidMetaDataException('No Azure metadata found') + msg = 'No Azure metadata found' + report_diagnostic_event(msg) + raise sources.InvalidMetaDataException(msg) if found == ddir: LOG.debug("using files cached in %s", ddir) @@ -469,9 +482,14 @@ class DataSourceAzure(sources.DataSource): self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral else: - with EphemeralDHCPv4() as lease: - self._report_ready(lease=lease) - + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter) as lease: + self._report_ready(lease=lease) + except Exception as e: + report_diagnostic_event( + "exception while reporting ready: %s" % e) + raise return crawled_data def _is_platform_viable(self): @@ -492,6 +510,16 @@ class DataSourceAzure(sources.DataSource): """ if not self._is_platform_viable(): return False + try: + get_boot_telemetry() + except Exception as e: + LOG.warning("Failed to get boot telemetry: %s", e) + + try: + get_system_info() + except Exception as e: + LOG.warning("Failed to get system information: %s", e) + try: crawled_data = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', @@ -551,27 +579,55 @@ class DataSourceAzure(sources.DataSource): headers = {"Metadata": "true"} nl_sock = None report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) + self.imds_logging_threshold = 1 + self.imds_poll_counter = 1 + dhcp_attempts = 0 + vnet_switched = False + return_val = None def exc_cb(msg, exception): if isinstance(exception, UrlError) and exception.code == 404: + if self.imds_poll_counter == self.imds_logging_threshold: + # Reducing the logging frequency as we are polling IMDS + self.imds_logging_threshold *= 2 + LOG.debug("Call to IMDS with arguments %s failed " + "with status code %s after %s retries", + msg, exception.code, self.imds_poll_counter) + LOG.debug("Backing off logging threshold for the same " + "exception to %d", self.imds_logging_threshold) + self.imds_poll_counter += 1 return True + # If we get an exception while trying to call IMDS, we # call DHCP and setup the ephemeral network to acquire the new IP. + LOG.debug("Call to IMDS with arguments %s failed with " + "status code %s", msg, exception.code) + report_diagnostic_event("polling IMDS failed with exception %s" + % exception.code) return False LOG.debug("Wait for vnetswitch to happen") while True: try: - # Save our EphemeralDHCPv4 context so we avoid repeated dhcp - self._ephemeral_dhcp_ctx = EphemeralDHCPv4() - lease = self._ephemeral_dhcp_ctx.obtain_lease() + # Save our EphemeralDHCPv4 context to avoid repeated dhcp + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=azure_ds_reporter): + self._ephemeral_dhcp_ctx = EphemeralDHCPv4() + lease = self._ephemeral_dhcp_ctx.obtain_lease() + + if vnet_switched: + dhcp_attempts += 1 if report_ready: try: nl_sock = netlink.create_bound_netlink_socket() except netlink.NetlinkCreateSocketError as e: + report_diagnostic_event(e) LOG.warning(e) self._ephemeral_dhcp_ctx.clean_network() - return + break + path = REPORTED_READY_MARKER_FILE LOG.info( "Creating a marker file to report ready: %s", path) @@ -579,17 +635,33 @@ class DataSourceAzure(sources.DataSource): pid=os.getpid(), time=time())) self._report_ready(lease=lease) report_ready = False - try: - netlink.wait_for_media_disconnect_connect( - nl_sock, lease['interface']) - except AssertionError as error: - LOG.error(error) - return + + with events.ReportEventStack( + name="wait-for-media-disconnect-connect", + description="wait for vnet switch", + parent=azure_ds_reporter): + try: + netlink.wait_for_media_disconnect_connect( + nl_sock, lease['interface']) + except AssertionError as error: + report_diagnostic_event(error) + LOG.error(error) + break + + vnet_switched = True self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, - headers=headers, exception_cb=exc_cb, - infinite=True, log_req_resp=False).contents + with events.ReportEventStack( + name="get-reprovision-data-from-imds", + description="get reprovision data from imds", + parent=azure_ds_reporter): + return_val = readurl(url, + timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, + exception_cb=exc_cb, + infinite=True, + log_req_resp=False).contents + break except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -598,6 +670,14 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + if vnet_switched: + report_diagnostic_event("attempted dhcp %d times after reuse" % + dhcp_attempts) + report_diagnostic_event("polled imds %d times after reuse" % + self.imds_poll_counter) + + return return_val + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ @@ -666,9 +746,12 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['agent_command']) try: fabric_data = metadata_func() - except Exception: + except Exception as e: + report_diagnostic_event( + "Error communicating with Azure fabric; You may experience " + "connectivity issues: %s" % e) LOG.warning( - "Error communicating with Azure fabric; You may experience." + "Error communicating with Azure fabric; You may experience " "connectivity issues.", exc_info=True) return False @@ -1027,7 +1110,9 @@ def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: - raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) + error_str = "Invalid ovf-env.xml: %s" % e + report_diagnostic_event(error_str) + raise BrokenAzureDataSource(error_str) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") @@ -1299,8 +1384,13 @@ def get_metadata_from_imds(fallback_nic, retries): if net.is_up(fallback_nic): return util.log_time(**kwargs) else: - with EphemeralDHCPv4(fallback_nic): - return util.log_time(**kwargs) + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter, fallback_nic): + return util.log_time(**kwargs) + except Exception as e: + report_diagnostic_event("exception while getting metadata: %s" % e) + raise @azure_ds_telemetry_reporter @@ -1313,11 +1403,14 @@ def _get_metadata_from_imds(retries): url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, retries=retries, exception_cb=retry_on_url_exc) except Exception as e: - LOG.debug('Ignoring IMDS instance metadata: %s', e) + msg = 'Ignoring IMDS instance metadata: %s' % e + report_diagnostic_event(msg) + LOG.debug(msg) return {} try: return util.load_json(str(response)) - except json.decoder.JSONDecodeError: + except json.decoder.JSONDecodeError as e: + report_diagnostic_event('non-json imds response' % e) LOG.warning( 'Ignoring non-json IMDS instance metadata: %s', str(response)) return {} @@ -1370,8 +1463,10 @@ def _is_platform_viable(seed_dir): asset_tag = util.read_dmi_data('chassis-asset-tag') if asset_tag == AZURE_CHASSIS_ASSET_TAG: return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag + LOG.debug(msg) + evt.description = msg + report_diagnostic_event(msg) if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): return True return False diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 82c4c8c4..f1fba175 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,7 +16,11 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit import version +from cloudinit import distros from cloudinit.reporting import events +from cloudinit.net.dhcp import EphemeralDHCPv4 +from datetime import datetime LOG = logging.getLogger(__name__) @@ -24,6 +28,10 @@ LOG = logging.getLogger(__name__) # value is applied if the endpoint can't be found within a lease file DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" +BOOT_EVENT_TYPE = 'boot-telemetry' +SYSTEMINFO_EVENT_TYPE = 'system-info' +DIAGNOSTIC_EVENT_TYPE = 'diagnostic' + azure_ds_reporter = events.ReportEventStack( name="azure-ds", description="initialize reporter for azure ds", @@ -40,6 +48,105 @@ def azure_ds_telemetry_reporter(func): return impl +@azure_ds_telemetry_reporter +def get_boot_telemetry(): + """Report timestamps related to kernel initialization and systemd + activation of cloud-init""" + if not distros.uses_systemd(): + raise RuntimeError( + "distro not using systemd, skipping boot telemetry") + + LOG.debug("Collecting boot telemetry") + try: + kernel_start = float(time.time()) - float(util.uptime()) + except ValueError: + raise RuntimeError("Failed to determine kernel start timestamp") + + try: + out, _ = util.subp(['/bin/systemctl', + 'show', '-p', + 'UserspaceTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + + if not tsm: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd") + + user_start = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd: %s" + % e) + + try: + out, _ = util.subp(['/bin/systemctl', 'show', + 'cloud-init-local', '-p', + 'InactiveExitTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + if not tsm: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd") + + cloudinit_activation = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd: %s" + % e) + + evt = events.ReportingEvent( + BOOT_EVENT_TYPE, 'boot-telemetry', + "kernel_start=%s user_start=%s cloudinit_activation=%s" % + (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z', + datetime.utcfromtimestamp(user_start).isoformat() + 'Z', + datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'), + events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +@azure_ds_telemetry_reporter +def get_system_info(): + """Collect and report system information""" + info = util.system_info() + evt = events.ReportingEvent( + SYSTEMINFO_EVENT_TYPE, 'system information', + "cloudinit_version=%s, kernel_version=%s, variant=%s, " + "distro_name=%s, distro_version=%s, flavor=%s, " + "python_version=%s" % + (version.version_string(), info['release'], info['variant'], + info['dist'][0], info['dist'][1], info['dist'][2], + info['python']), events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +def report_diagnostic_event(str): + """Report a diagnostic event""" + evt = events.ReportingEvent( + DIAGNOSTIC_EVENT_TYPE, 'diagnostic message', + str, events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -360,16 +467,19 @@ class WALinuxAgentShim(object): value = dhcp245 LOG.debug("Using Azure Endpoint from dhcp options") if value is None: + report_diagnostic_event("No Azure endpoint from dhcp options") LOG.debug('Finding Azure endpoint from networkd...') value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/.json # a dhclient exit hook that calls cloud-init-dhclient-hook + report_diagnostic_event("No Azure endpoint from networkd") LOG.debug('Finding Azure endpoint from hook json...') dhcp_options = WALinuxAgentShim._load_dhclient_json() value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options) if value is None: # Fallback and check the leases file if unsuccessful + report_diagnostic_event("No Azure endpoint from dhclient logs") LOG.debug("Unable to find endpoint in dhclient logs. " " Falling back to check lease files") if fallback_lease_file is None: @@ -381,11 +491,15 @@ class WALinuxAgentShim(object): value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) if value is None: - LOG.warning("No lease found; using default endpoint") + msg = "No lease found; using default endpoint" + report_diagnostic_event(msg) + LOG.warning(msg) value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + msg = 'Azure endpoint found at %s' % endpoint_ip_address + report_diagnostic_event(msg) + LOG.debug(msg) return endpoint_ip_address @azure_ds_telemetry_reporter @@ -399,16 +513,19 @@ class WALinuxAgentShim(object): try: response = http_client.get( 'http://{0}/machine/?comp=goalstate'.format(self.endpoint)) - except Exception: + except Exception as e: if attempts < 10: time.sleep(attempts + 1) else: + report_diagnostic_event( + "failed to register with Azure: %s" % e) raise else: break attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) + report_diagnostic_event("container_id %s" % goal_state.container_id) ssh_keys = [] if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') @@ -449,11 +566,20 @@ class WALinuxAgentShim(object): container_id=goal_state.container_id, instance_id=goal_state.instance_id, ) - http_client.post( - "http://{0}/machine?comp=health".format(self.endpoint), - data=document, - extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, - ) + # Host will collect kvps when cloud-init reports ready. + # some kvps might still be in the queue. We yield the scheduler + # to make sure we process all kvps up till this point. + time.sleep(0) + try: + http_client.post( + "http://{0}/machine?comp=health".format(self.endpoint), + data=document, + extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, + ) + except Exception as e: + report_diagnostic_event("exception while reporting ready: %s" % e) + raise + LOG.info('Reported ready to Azure fabric.') @@ -467,4 +593,22 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, finally: shim.clean_up() + +class EphemeralDHCPv4WithReporting(object): + def __init__(self, reporter, nic=None): + self.reporter = reporter + self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic) + + def __enter__(self): + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=self.reporter): + return self.ephemeralDHCPv4.__enter__() + + def __exit__(self, excp_type, excp_value, excp_traceback): + self.ephemeralDHCPv4.__exit__( + excp_type, excp_value, excp_traceback) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 4d57cebc..3547dd94 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -181,7 +181,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): self.logs.getvalue()) @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') @mock.patch(MOCKPATH + 'net.is_up') def test_get_metadata_performs_dhcp_when_network_is_down( self, m_net_is_up, m_dhcp, m_readurl): @@ -195,7 +195,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): dsaz.get_metadata_from_imds('eth9', retries=2)) m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_called_with('eth9') + m_dhcp.assert_called_with(mock.ANY, 'eth9') self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time self.logs.getvalue()) @@ -552,7 +552,8 @@ scbus-1 on xpt0 bus 0 dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) - @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') @mock.patch( 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') @@ -1308,7 +1309,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) - def test_environment_correct_for_bounce_command(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_environment_correct_for_bounce_command( + self, mock_get_boot_telemetry): interface = 'int0' hostname = 'my-new-host' old_hostname = 'my-old-host' @@ -1324,7 +1327,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(hostname, bounce_env['hostname']) self.assertEqual(old_hostname, bounce_env['old_hostname']) - def test_default_bounce_command_ifup_used_by_default(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_default_bounce_command_ifup_used_by_default( + self, mock_get_boot_telemetry): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py old mode 100755 new mode 100644 index d01ed5b3..640895a4 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -7,9 +7,12 @@ import json import os import struct import time +import re +import mock from cloudinit import util from cloudinit.tests.helpers import CiTestCase +from cloudinit.sources.helpers import azure class TestKvpEncoding(CiTestCase): @@ -126,3 +129,65 @@ class TextKvpReporter(CiTestCase): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) kvps = list(reporter._iterate_kvps(0)) self.assertEqual(0, len(kvps)) + + @mock.patch('cloudinit.distros.uses_systemd') + @mock.patch('cloudinit.util.subp') + def test_get_boot_telemetry(self, m_subp, m_sysd): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" + r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)" + + # get_boot_telemetry makes two subp calls to systemctl. We provide + # a list of values that the subp calls should return + m_subp.side_effect = [ + ('UserspaceTimestampMonotonic=1844838', ''), + ('InactiveExitTimestampMonotonic=3068203', '')] + m_sysd.return_value = True + + reporter.publish_event(azure.get_boot_telemetry()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + evt_msg = kvps[0]['value'] + if not re.search("kernel_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing kernel_start timestamp") + if not re.search("user_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing user_start timestamp") + if not re.search("cloudinit_activation=" + datetime_pattern, + evt_msg): + raise AssertionError( + "missing cloudinit_activation timestamp") + + def test_get_system_info(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + pattern = r"[^=\s]+" + + reporter.publish_event(azure.get_system_info()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + # the most important information is cloudinit version, + # kernel_version, and the distro variant. It is ok if + # if the rest is not available + if not re.search("cloudinit_version=" + pattern, evt_msg): + raise AssertionError("missing cloudinit_version string") + if not re.search("kernel_version=" + pattern, evt_msg): + raise AssertionError("missing kernel_version string") + if not re.search("variant=" + pattern, evt_msg): + raise AssertionError("missing distro variant string") + + def test_report_diagnostic_event(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + + reporter.publish_event( + azure.report_diagnostic_event("test_diagnostic")) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + if "test_diagnostic" not in evt_msg: + raise AssertionError("missing expected diagnostic message") -- cgit v1.2.3 From 2c52e6e88b19f5db8d55eb7280ee27703e05d75f Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 19 Aug 2019 20:52:54 +0000 Subject: DataSourceOracle: prefer DS network config over initramfs The Oracle platform provides networking configuration from two sources: * the primary interface configuration comes from the initramfs, because Oracle instance all iSCSI boot * secondary interface configuration comes from an IMDS accessed over HTTP As we need to combine these two sources of network configuration, the default "prefer initramfs config over data source config" behaviour isn't appropriate; we would never get the IMDS interfaces via that route. Instead, the Oracle data source has code to combine these two sources, so we prefer its network configuration over the initramfs configuration. (This is not appropriate default behaviour, because _in general_ data sources won't know how to merge initramfs-provided configuration into their provided configuration, so switching this order for all data sources would result in initramfs configuration being discarded on any data source that implements network_config.) --- cloudinit/sources/DataSourceOracle.py | 7 +++++++ cloudinit/sources/tests/test_oracle.py | 10 +++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 086af799..6e73f568 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -109,6 +109,13 @@ class DataSourceOracle(sources.DataSource): dsname = 'Oracle' system_uuid = None vendordata_pure = None + network_config_sources = ( + sources.NetworkConfigSource.cmdline, + sources.NetworkConfigSource.ds, + sources.NetworkConfigSource.initramfs, + sources.NetworkConfigSource.system_cfg, + ) + _network_config = sources.UNSET def __init__(self, sys_cfg, *args, **kwargs): diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 3e146776..3ddf7dfd 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.sources import DataSourceOracle as oracle -from cloudinit.sources import BrokenMetadata +from cloudinit.sources import BrokenMetadata, NetworkConfigSource from cloudinit import helpers from cloudinit.tests import helpers as test_helpers @@ -303,6 +303,14 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertIn('Failed to fetch secondary network configuration', self.logs.getvalue()) + def test_ds_network_cfg_preferred_over_initramfs(self): + """Ensure that DS net config is preferred over initramfs config""" + network_config_sources = oracle.DataSourceOracle.network_config_sources + self.assertLess( + network_config_sources.index(NetworkConfigSource.ds), + network_config_sources.index(NetworkConfigSource.initramfs) + ) + @mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4())) class TestReadMetaData(test_helpers.HttprettyTestCase): -- cgit v1.2.3 From d1b022217a652c7a84d5430c9e571987864d3982 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 28 Aug 2019 00:58:16 +0000 Subject: exoscale: fix sysconfig cloud_config_modules overrides Make sure Exoscale supplements or overrides existing system config setting cloud_config_modules instead of replacing it with a one item list set-passords LP: #1841454 --- cloudinit/sources/DataSourceExoscale.py | 26 ++++++++++++++++-------- tests/unittests/test_datasource/test_exoscale.py | 24 ++++++++++++++-------- 2 files changed, 33 insertions(+), 17 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py index 52e7f6f6..fdfb4ed3 100644 --- a/cloudinit/sources/DataSourceExoscale.py +++ b/cloudinit/sources/DataSourceExoscale.py @@ -6,6 +6,7 @@ from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources +from cloudinit import helpers from cloudinit import url_helper from cloudinit import util @@ -20,13 +21,6 @@ URL_RETRIES = 6 EXOSCALE_DMI_NAME = "Exoscale" -BUILTIN_DS_CONFIG = { - # We run the set password config module on every boot in order to enable - # resetting the instance's password via the exoscale console (and a - # subsequent instance reboot). - 'cloud_config_modules': [["set-passwords", "always"]] -} - class DataSourceExoscale(sources.DataSource): @@ -42,8 +36,22 @@ class DataSourceExoscale(sources.DataSource): self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) - - self.extra_config = BUILTIN_DS_CONFIG + self.extra_config = {} + + def activate(self, cfg, is_new_instance): + """Adjust set-passwords module to run 'always' during each boot""" + # We run the set password config module on every boot in order to + # enable resetting the instance's password via the exoscale console + # (and a subsequent instance reboot). + # Exoscale password server only provides set-passwords user-data if + # a user has triggered a password reset. So calling that password + # service generally results in no additional cloud-config. + # TODO(Create util functions for overriding merged sys_cfg module freq) + mod = 'set_passwords' + sem_path = self.paths.get_ipath_cur('sem') + sem_helper = helpers.FileSemaphores(sem_path) + if sem_helper.clear('config_' + mod, None): + LOG.debug('Overriding module set-passwords with frequency always') def wait_for_metadata_service(self): """Wait for the metadata service to be reachable.""" diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py index 350c3304..f0061199 100644 --- a/tests/unittests/test_datasource/test_exoscale.py +++ b/tests/unittests/test_datasource/test_exoscale.py @@ -11,8 +11,10 @@ from cloudinit.sources.DataSourceExoscale import ( PASSWORD_SERVER_PORT, read_metadata) from cloudinit.tests.helpers import HttprettyTestCase, mock +from cloudinit import util import httpretty +import os import requests @@ -63,6 +65,18 @@ class TestDatasourceExoscale(HttprettyTestCase): password = get_password() self.assertEqual(expected_password, password) + def test_activate_removes_set_passwords_semaphore(self): + """Allow set_passwords to run every boot by removing the semaphore.""" + path = helpers.Paths({'cloud_dir': self.tmp}) + sem_dir = self.tmp_path('instance/sem', dir=self.tmp) + util.ensure_dir(sem_dir) + sem_file = os.path.join(sem_dir, 'config_set_passwords') + with open(sem_file, 'w') as stream: + stream.write('') + ds = DataSourceExoscale({}, None, path) + ds.activate(None, None) + self.assertFalse(os.path.exists(sem_file)) + def test_get_data(self): """The datasource conforms to expected behavior when supplied full test data.""" @@ -95,8 +109,6 @@ class TestDatasourceExoscale(HttprettyTestCase): self.assertEqual(ds.get_config_obj(), {'ssh_pwauth': True, 'password': expected_password, - 'cloud_config_modules': [ - ["set-passwords", "always"]], 'chpasswd': { 'expire': False, }}) @@ -130,9 +142,7 @@ class TestDatasourceExoscale(HttprettyTestCase): self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") self.assertEqual(ds.metadata, {"instance-id": expected_id, "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), - {'cloud_config_modules': [ - ["set-passwords", "always"]]}) + self.assertEqual(ds.get_config_obj(), {}) def test_get_data_no_password(self): """The datasource conforms to expected behavior when no password is @@ -163,9 +173,7 @@ class TestDatasourceExoscale(HttprettyTestCase): self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") self.assertEqual(ds.metadata, {"instance-id": expected_id, "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), - {'cloud_config_modules': [ - ["set-passwords", "always"]]}) + self.assertEqual(ds.get_config_obj(), {}) @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') def test_read_metadata_when_password_server_unreachable(self, m_password): -- cgit v1.2.3 From e7881d5c62e68e4962e2fcd3d12089e5a3b94dc9 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 28 Aug 2019 16:10:35 +0000 Subject: Oracle: Render secondary vnic IP and MTU values only When rendering secondary vnic configuration from IMDS, only emit configuration for the IP and MTU values only. Add support to mutate either a v1 or a v2 network_config input. --- cloudinit/sources/DataSourceOracle.py | 36 +++++++++++++++++-------------- cloudinit/sources/tests/test_oracle.py | 39 +++++++++++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 19 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 6e73f568..1cb0636c 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -51,8 +51,8 @@ def _add_network_config_from_opc_imds(network_config): include the secondary VNICs. :param network_config: - A v1 network config dict with the primary NIC already configured. This - dict will be mutated. + A v1 or v2 network config dict with the primary NIC already configured. + This dict will be mutated. :raises: Exceptions are not handled within this function. Likely exceptions are @@ -88,20 +88,24 @@ def _add_network_config_from_opc_imds(network_config): LOG.debug('Interface with MAC %s not found; skipping', mac_address) continue name = interfaces_by_mac[mac_address] - subnet = { - 'type': 'static', - 'address': vnic_dict['privateIp'], - 'netmask': vnic_dict['subnetCidrBlock'].split('/')[1], - 'gateway': vnic_dict['virtualRouterIp'], - 'control': 'manual', - } - network_config['config'].append({ - 'name': name, - 'type': 'physical', - 'mac_address': mac_address, - 'mtu': MTU, - 'subnets': [subnet], - }) + + if network_config['version'] == 1: + subnet = { + 'type': 'static', + 'address': vnic_dict['privateIp'], + } + network_config['config'].append({ + 'name': name, + 'type': 'physical', + 'mac_address': mac_address, + 'mtu': MTU, + 'subnets': [subnet], + }) + elif network_config['version'] == 2: + network_config['ethernets'][name] = { + 'addresses': [vnic_dict['privateIp']], + 'mtu': MTU, 'dhcp4': False, 'dhcp6': False, + 'match': {'macaddress': mac_address}} class DataSourceOracle(sources.DataSource): diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 3ddf7dfd..2a70bbc9 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -526,6 +526,18 @@ class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase): 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping', self.logs.getvalue()) + def test_missing_mac_skipped_v2(self): + self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE + self.m_get_interfaces_by_mac.return_value = {} + + network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}} + oracle._add_network_config_from_opc_imds(network_config) + + self.assertEqual(1, len(network_config['ethernets'])) + self.assertIn( + 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping', + self.logs.getvalue()) + def test_secondary_nic(self): self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' @@ -549,8 +561,29 @@ class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase): subnet_cfg = secondary_nic_cfg['subnets'][0] # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE self.assertEqual('10.0.0.231', subnet_cfg['address']) - self.assertEqual('24', subnet_cfg['netmask']) - self.assertEqual('10.0.0.1', subnet_cfg['gateway']) - self.assertEqual('manual', subnet_cfg['control']) + + def test_secondary_nic_v2(self): + self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + + network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}} + oracle._add_network_config_from_opc_imds(network_config) + + # The input is mutated + self.assertEqual(2, len(network_config['ethernets'])) + + secondary_nic_cfg = network_config['ethernets']['ens3'] + self.assertFalse(secondary_nic_cfg['dhcp4']) + self.assertFalse(secondary_nic_cfg['dhcp6']) + self.assertEqual(mac_addr, secondary_nic_cfg['match']['macaddress']) + self.assertEqual(9000, secondary_nic_cfg['mtu']) + + self.assertEqual(1, len(secondary_nic_cfg['addresses'])) + # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE + self.assertEqual('10.0.0.231', secondary_nic_cfg['addresses'][0]) + # vi: ts=4 expandtab -- cgit v1.2.3 From fa47d527a03a00319936323f0a857fbecafceaf7 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 9 Sep 2019 21:13:01 +0000 Subject: net,Oracle: Add support for netfailover detection Add support for detecting netfailover[1] device 3-tuple in networking layer. In the Oracle datasource ensure that if a provided network config, either fallback or provided config includes a netfailover master to remove any MAC address value as this can break under 3-netdev as the other two devices have the same MAC. 1. https://www.kernel.org/doc/html/latest/networking/net_failover.html --- cloudinit/net/__init__.py | 133 +++++++++++++- cloudinit/net/tests/test_init.py | 310 +++++++++++++++++++++++++++++++++ cloudinit/sources/DataSourceOracle.py | 62 ++++++- cloudinit/sources/tests/test_oracle.py | 147 ++++++++++++++++ cloudinit/tests/helpers.py | 8 + 5 files changed, 656 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index ea707c09..0eb952fe 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -109,6 +109,123 @@ def is_bond(devname): return os.path.exists(sys_dev_path(devname, "bonding")) +def is_netfailover(devname, driver=None): + """ netfailover driver uses 3 nics, master, primary and standby. + this returns True if the device is either the primary or standby + as these devices are to be ignored. + """ + if driver is None: + driver = device_driver(devname) + if is_netfail_primary(devname, driver) or is_netfail_standby(devname, + driver): + return True + return False + + +def get_dev_features(devname): + """ Returns a str from reading /sys/class/net//device/features.""" + features = '' + try: + features = read_sys_net(devname, 'device/features') + except Exception: + pass + return features + + +def has_netfail_standby_feature(devname): + """ Return True if VIRTIO_NET_F_STANDBY bit (62) is set. + + https://github.com/torvalds/linux/blob/ \ + 089cf7f6ecb266b6a4164919a2e69bd2f938374a/ \ + include/uapi/linux/virtio_net.h#L60 + """ + features = get_dev_features(devname) + if not features or len(features) < 64: + return False + return features[62] == "1" + + +def is_netfail_master(devname, driver=None): + """ A device is a "netfail master" device if: + + - The device does NOT have the 'master' sysfs attribute + - The device driver is 'virtio_net' + - The device has the standby feature bit set + + Return True if all of the above is True. + """ + if os.path.exists(sys_dev_path(devname, path='master')): + return False + + if driver is None: + driver = device_driver(devname) + + if driver != "virtio_net": + return False + + if not has_netfail_standby_feature(devname): + return False + + return True + + +def is_netfail_primary(devname, driver=None): + """ A device is a "netfail primary" device if: + + - the device has a 'master' sysfs file + - the device driver is not 'virtio_net' + - the 'master' sysfs file points to device with virtio_net driver + - the 'master' device has the 'standby' feature bit set + + Return True if all of the above is True. + """ + # /sys/class/net//master -> ../../ + master_sysfs_path = sys_dev_path(devname, path='master') + if not os.path.exists(master_sysfs_path): + return False + + if driver is None: + driver = device_driver(devname) + + if driver == "virtio_net": + return False + + master_devname = os.path.basename(os.path.realpath(master_sysfs_path)) + master_driver = device_driver(master_devname) + if master_driver != "virtio_net": + return False + + master_has_standby = has_netfail_standby_feature(master_devname) + if not master_has_standby: + return False + + return True + + +def is_netfail_standby(devname, driver=None): + """ A device is a "netfail standby" device if: + + - The device has a 'master' sysfs attribute + - The device driver is 'virtio_net' + - The device has the standby feature bit set + + Return True if all of the above is True. + """ + if not os.path.exists(sys_dev_path(devname, path='master')): + return False + + if driver is None: + driver = device_driver(devname) + + if driver != "virtio_net": + return False + + if not has_netfail_standby_feature(devname): + return False + + return True + + def is_renamed(devname): """ /* interface name assignment types (sysfs name_assign_type attribute) */ @@ -227,6 +344,9 @@ def find_fallback_nic(blacklist_drivers=None): if is_bond(interface): # skip any bonds continue + if is_netfailover(interface): + # ignore netfailover primary/standby interfaces + continue carrier = read_sys_net_int(interface, 'carrier') if carrier: connected.append(interface) @@ -273,9 +393,14 @@ def generate_fallback_config(blacklist_drivers=None, config_driver=None): if not target_name: # can't read any interfaces addresses (or there are none); give up return None - target_mac = read_sys_net_safe(target_name, 'address') - cfg = {'dhcp4': True, 'set-name': target_name, - 'match': {'macaddress': target_mac.lower()}} + + # netfail cannot use mac for matching, they have duplicate macs + if is_netfail_master(target_name): + match = {'name': target_name} + else: + match = { + 'macaddress': read_sys_net_safe(target_name, 'address').lower()} + cfg = {'dhcp4': True, 'set-name': target_name, 'match': match} if config_driver: driver = device_driver(target_name) if driver: @@ -661,6 +786,8 @@ def get_interfaces(): continue if is_bond(name): continue + if is_netfailover(name): + continue mac = get_interface_mac(name) # some devices may not have a mac (tun0) if not mac: diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index d2e38f00..7259dbe3 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -204,6 +204,10 @@ class TestGenerateFallbackConfig(CiTestCase): self.add_patch('cloudinit.net.util.is_container', 'm_is_container', return_value=False) self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') + self.add_patch('cloudinit.net.is_netfailover', 'm_netfail', + return_value=False) + self.add_patch('cloudinit.net.is_netfail_master', 'm_netfail_master', + return_value=False) def test_generate_fallback_finds_connected_eth_with_mac(self): """generate_fallback_config finds any connected device with a mac.""" @@ -268,6 +272,61 @@ class TestGenerateFallbackConfig(CiTestCase): ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) self.assertIsNone(net.generate_fallback_config()) + def test_generate_fallback_config_skips_netfail_devs(self): + """gen_fallback_config ignores netfail primary,sby no mac on master.""" + mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac + for iface in ['ens3', 'ens3sby', 'enP0s1f3']: + write_file(os.path.join(self.sysdir, iface, 'carrier'), '1') + write_file( + os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') + write_file( + os.path.join(self.sysdir, iface, 'address'), mac) + + def is_netfail(iface, _driver=None): + # ens3 is the master + if iface == 'ens3': + return False + return True + self.m_netfail.side_effect = is_netfail + + def is_netfail_master(iface, _driver=None): + # ens3 is the master + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = is_netfail_master + expected = { + 'ethernets': { + 'ens3': {'dhcp4': True, 'match': {'name': 'ens3'}, + 'set-name': 'ens3'}}, + 'version': 2} + result = net.generate_fallback_config() + self.assertEqual(expected, result) + + +class TestNetFindFallBackNic(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestNetFindFallBackNic, self).setUp() + sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + self.m_sys_path = sys_mock.start() + self.sysdir = self.tmp_dir() + '/' + self.m_sys_path.return_value = self.sysdir + self.addCleanup(sys_mock.stop) + self.add_patch('cloudinit.net.util.is_container', 'm_is_container', + return_value=False) + self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') + + def test_generate_fallback_finds_first_connected_eth_with_mac(self): + """find_fallback_nic finds any connected device with a mac.""" + write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') + write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1') + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) + self.assertEqual('eth1', net.find_fallback_nic()) + class TestGetDeviceList(CiTestCase): @@ -365,6 +424,26 @@ class TestGetInterfaceMAC(CiTestCase): expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)] self.assertEqual(expected, net.get_interfaces()) + @mock.patch('cloudinit.net.is_netfailover') + def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail): + """Ignore interfaces if netfailover primary or standby.""" + mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac + for iface in ['ens3', 'ens3sby', 'enP0s1f3']: + write_file( + os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') + write_file( + os.path.join(self.sysdir, iface, 'address'), mac) + + def is_netfail(iface, _driver=None): + # ens3 is the master + if iface == 'ens3': + return False + else: + return True + m_netfail.side_effect = is_netfail + expected = [('ens3', mac, None, None)] + self.assertEqual(expected, net.get_interfaces()) + class TestInterfaceHasOwnMAC(CiTestCase): @@ -922,3 +1001,234 @@ class TestWaitForPhysdevs(CiTestCase): self.m_get_iface_mac.return_value = {} net.wait_for_physdevs(netcfg, strict=False) self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count) + + +class TestNetFailOver(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestNetFailOver, self).setUp() + self.add_patch('cloudinit.net.util', 'm_util') + self.add_patch('cloudinit.net.read_sys_net', 'm_read_sys_net') + self.add_patch('cloudinit.net.device_driver', 'm_device_driver') + + def test_get_dev_features(self): + devname = self.random_string() + features = self.random_string() + self.m_read_sys_net.return_value = features + + self.assertEqual(features, net.get_dev_features(devname)) + self.assertEqual(1, self.m_read_sys_net.call_count) + self.assertEqual(mock.call(devname, 'device/features'), + self.m_read_sys_net.call_args_list[0]) + + def test_get_dev_features_none_returns_empty_string(self): + devname = self.random_string() + self.m_read_sys_net.side_effect = Exception('error') + self.assertEqual('', net.get_dev_features(devname)) + self.assertEqual(1, self.m_read_sys_net.call_count) + self.assertEqual(mock.call(devname, 'device/features'), + self.m_read_sys_net.call_args_list[0]) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature(self, m_dev_features): + devname = self.random_string() + standby_features = ('0' * 62) + '1' + '0' + m_dev_features.return_value = standby_features + self.assertTrue(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature_short_is_false(self, m_dev_features): + devname = self.random_string() + standby_features = self.random_string() + m_dev_features.return_value = standby_features + self.assertFalse(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature_not_present_is_false(self, + m_dev_features): + devname = self.random_string() + standby_features = '0' * 64 + m_dev_features.return_value = standby_features + self.assertFalse(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature_no_features_is_false(self, + m_dev_features): + devname = self.random_string() + standby_features = None + m_dev_features.return_value = standby_features + self.assertFalse(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = False # no master sysfs attr + m_standby.return_value = True # has standby feature flag + self.assertTrue(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_master_checks_master_attr(self, m_sysdev): + devname = self.random_string() + driver = 'virtio_net' + m_sysdev.return_value = self.random_string() + self.assertFalse(net.is_netfail_master(devname, driver)) + self.assertEqual(1, m_sysdev.call_count) + self.assertEqual(mock.call(devname, path='master'), + m_sysdev.call_args_list[0]) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master_wrong_driver(self, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() + self.assertFalse(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master_has_master_attr(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = True # has master sysfs attr + self.assertFalse(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = False # no master sysfs attr + m_standby.return_value = False # no standby feature flag + self.assertFalse(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + master_devname = self.random_string() + m_sysdev.return_value = "%s/%s" % (self.random_string(), + master_devname) + m_exists.return_value = True # has master sysfs attr + self.m_device_driver.return_value = 'virtio_net' # master virtio_net + m_standby.return_value = True # has standby feature flag + self.assertTrue(net.is_netfail_primary(devname, driver)) + self.assertEqual(1, self.m_device_driver.call_count) + self.assertEqual(mock.call(master_devname), + self.m_device_driver.call_args_list[0]) + self.assertEqual(1, m_standby.call_count) + self.assertEqual(mock.call(master_devname), + m_standby.call_args_list[0]) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_wrong_driver(self, m_sysdev, m_exists, + m_standby): + devname = self.random_string() + driver = 'virtio_net' + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + m_exists.return_value = False # no master sysfs attr + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_bad_master(self, m_sysdev, m_exists, + m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + master_devname = self.random_string() + m_sysdev.return_value = "%s/%s" % (self.random_string(), + master_devname) + m_exists.return_value = True # has master sysfs attr + self.m_device_driver.return_value = 'XXXX' # master not virtio_net + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_no_standby(self, m_sysdev, m_exists, + m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + master_devname = self.random_string() + m_sysdev.return_value = "%s/%s" % (self.random_string(), + master_devname) + m_exists.return_value = True # has master sysfs attr + self.m_device_driver.return_value = 'virtio_net' # master virtio_net + m_standby.return_value = False # master has no standby feature flag + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = True # has master sysfs attr + m_standby.return_value = True # has standby feature flag + self.assertTrue(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() + self.assertFalse(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby_no_master(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = False # has master sysfs attr + self.assertFalse(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = True # has master sysfs attr + m_standby.return_value = False # has standby feature flag + self.assertFalse(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.is_netfail_standby') + @mock.patch('cloudinit.net.is_netfail_primary') + def test_is_netfailover_primary(self, m_primary, m_standby): + devname = self.random_string() + driver = self.random_string() + m_primary.return_value = True + m_standby.return_value = False + self.assertTrue(net.is_netfailover(devname, driver)) + + @mock.patch('cloudinit.net.is_netfail_standby') + @mock.patch('cloudinit.net.is_netfail_primary') + def test_is_netfailover_standby(self, m_primary, m_standby): + devname = self.random_string() + driver = self.random_string() + m_primary.return_value = False + m_standby.return_value = True + self.assertTrue(net.is_netfailover(devname, driver)) + + @mock.patch('cloudinit.net.is_netfail_standby') + @mock.patch('cloudinit.net.is_netfail_primary') + def test_is_netfailover_returns_false(self, m_primary, m_standby): + devname = self.random_string() + driver = self.random_string() + m_primary.return_value = False + m_standby.return_value = False + self.assertFalse(net.is_netfailover(devname, driver)) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 1cb0636c..eec87403 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -16,7 +16,7 @@ Notes: """ from cloudinit.url_helper import combine_url, readurl, UrlError -from cloudinit.net import dhcp, get_interfaces_by_mac +from cloudinit.net import dhcp, get_interfaces_by_mac, is_netfail_master from cloudinit import net from cloudinit import sources from cloudinit import util @@ -108,6 +108,56 @@ def _add_network_config_from_opc_imds(network_config): 'match': {'macaddress': mac_address}} +def _ensure_netfailover_safe(network_config): + """ + Search network config physical interfaces to see if any of them are + a netfailover master. If found, we prevent matching by MAC as the other + failover devices have the same MAC but need to be ignored. + + Note: we rely on cloudinit.net changes which prevent netfailover devices + from being present in the provided network config. For more details about + netfailover devices, refer to cloudinit.net module. + + :param network_config + A v1 or v2 network config dict with the primary NIC, and possibly + secondary nic configured. This dict will be mutated. + + """ + # ignore anything that's not an actual network-config + if 'version' not in network_config: + return + + if network_config['version'] not in [1, 2]: + LOG.debug('Ignoring unknown network config version: %s', + network_config['version']) + return + + mac_to_name = get_interfaces_by_mac() + if network_config['version'] == 1: + for cfg in [c for c in network_config['config'] if 'type' in c]: + if cfg['type'] == 'physical': + if 'mac_address' in cfg: + mac = cfg['mac_address'] + cur_name = mac_to_name.get(mac) + if not cur_name: + continue + elif is_netfail_master(cur_name): + del cfg['mac_address'] + + elif network_config['version'] == 2: + for _, cfg in network_config.get('ethernets', {}).items(): + if 'match' in cfg: + macaddr = cfg.get('match', {}).get('macaddress') + if macaddr: + cur_name = mac_to_name.get(macaddr) + if not cur_name: + continue + elif is_netfail_master(cur_name): + del cfg['match']['macaddress'] + del cfg['set-name'] + cfg['match']['name'] = cur_name + + class DataSourceOracle(sources.DataSource): dsname = 'Oracle' @@ -208,9 +258,13 @@ class DataSourceOracle(sources.DataSource): We nonetheless return cmdline provided config if present and fallback to generate fallback.""" if self._network_config == sources.UNSET: + # this is v1 self._network_config = cmdline.read_initramfs_config() + if not self._network_config: + # this is now v2 self._network_config = self.distro.generate_fallback_config() + if self.ds_cfg.get('configure_secondary_nics'): try: # Mutate self._network_config to include secondary VNICs @@ -219,6 +273,12 @@ class DataSourceOracle(sources.DataSource): util.logexc( LOG, "Failed to fetch secondary network configuration!") + + # we need to verify that the nic selected is not a netfail over + # device and, if it is a netfail master, then we need to avoid + # emitting any match by mac + _ensure_netfailover_safe(self._network_config) + return self._network_config diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 2a70bbc9..85b6db97 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -8,6 +8,7 @@ from cloudinit.tests import helpers as test_helpers from textwrap import dedent import argparse +import copy import httpretty import json import mock @@ -586,4 +587,150 @@ class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase): self.assertEqual('10.0.0.231', secondary_nic_cfg['addresses'][0]) +class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase): + + with_logs = True + + def setUp(self): + super(TestNetworkConfigFiltersNetFailover, self).setUp() + self.add_patch(DS_PATH + '.get_interfaces_by_mac', + 'm_get_interfaces_by_mac') + self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master') + + def test_ignore_bogus_network_config(self): + netcfg = {'something': 'here'} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + + def test_ignore_network_config_unknown_versions(self): + netcfg = {'something': 'here', 'version': 3} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + + def test_checks_v1_type_physical_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr, + 'subnets': [{'type': 'dhcp4'}]}]} + passed_netcfg = copy.copy(netcfg) + self.m_netfail_master.return_value = False + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual([mock.call(nic_name)], + self.m_netfail_master.call_args_list) + + def test_checks_v1_skips_non_phys_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 1, 'config': [ + {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr, + 'subnets': [{'type': 'dhcp4'}]}]} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual(0, self.m_netfail_master.call_count) + + def test_removes_master_mac_property_v1(self): + nic_master, mac_master = 'ens3', self.random_string() + nic_other, mac_other = 'ens7', self.random_string() + nic_extra, mac_extra = 'enp0s1f2', self.random_string() + self.m_get_interfaces_by_mac.return_value = { + mac_master: nic_master, + mac_other: nic_other, + mac_extra: nic_extra, + } + netcfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_master, + 'mac_address': mac_master}, + {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, + {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, + ]} + + def _is_netfail_master(iface): + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = _is_netfail_master + expected_cfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_master}, + {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, + {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, + ]} + oracle._ensure_netfailover_safe(netcfg) + self.assertEqual(expected_cfg, netcfg) + + def test_checks_v2_type_ethernet_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 2, 'ethernets': { + nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, + 'match': {'macaddress': mac_addr}}}} + passed_netcfg = copy.copy(netcfg) + self.m_netfail_master.return_value = False + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual([mock.call(nic_name)], + self.m_netfail_master.call_args_list) + + def test_skips_v2_non_ethernet_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 2, 'wifis': { + nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, + 'match': {'macaddress': mac_addr}}}} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual(0, self.m_netfail_master.call_count) + + def test_removes_master_mac_property_v2(self): + nic_master, mac_master = 'ens3', self.random_string() + nic_other, mac_other = 'ens7', self.random_string() + nic_extra, mac_extra = 'enp0s1f2', self.random_string() + self.m_get_interfaces_by_mac.return_value = { + mac_master: nic_master, + mac_other: nic_other, + mac_extra: nic_extra, + } + netcfg = {'version': 2, 'ethernets': { + nic_extra: {'dhcp4': True, 'set-name': nic_extra, + 'match': {'macaddress': mac_extra}}, + nic_other: {'dhcp4': True, 'set-name': nic_other, + 'match': {'macaddress': mac_other}}, + nic_master: {'dhcp4': True, 'set-name': nic_master, + 'match': {'macaddress': mac_master}}, + }} + + def _is_netfail_master(iface): + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = _is_netfail_master + + expected_cfg = {'version': 2, 'ethernets': { + nic_master: {'dhcp4': True, 'match': {'name': nic_master}}, + nic_extra: {'dhcp4': True, 'set-name': nic_extra, + 'match': {'macaddress': mac_extra}}, + nic_other: {'dhcp4': True, 'set-name': nic_other, + 'match': {'macaddress': mac_other}}, + }} + oracle._ensure_netfailover_safe(netcfg) + import pprint + pprint.pprint(netcfg) + print('---- ^^ modified ^^ ---- vv original vv ----') + pprint.pprint(expected_cfg) + self.assertEqual(expected_cfg, netcfg) + + # vi: ts=4 expandtab diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 23fddd07..4dad2afd 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -6,7 +6,9 @@ import functools import httpretty import logging import os +import random import shutil +import string import sys import tempfile import time @@ -243,6 +245,12 @@ class CiTestCase(TestCase): myds.metadata.update(metadata) return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None) + @classmethod + def random_string(cls, length=8): + """ return a random lowercase string with default length of 8""" + return ''.join( + random.choice(string.ascii_lowercase) for _ in range(length)) + class ResourceUsingTestCase(CiTestCase): -- cgit v1.2.3 From 45426d8d38a7224962867ba71f390cce653e0d17 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Wed, 11 Sep 2019 18:53:01 +0000 Subject: VMWware: add option into VMTools config to enable/disable custom script. VMWware customization already has support to run a custom script during the VM customization. Adding this option allows a VM administrator to disable the execution of customization scripts. If set the script will not execute and the customization status is set to GUESTCUST_ERROR_SCRIPT_DISABLED. --- cloudinit/sources/DataSourceOVF.py | 21 ++++++- .../sources/helpers/vmware/imc/guestcust_error.py | 1 + .../sources/helpers/vmware/imc/guestcust_util.py | 37 ++++++++++++ tests/unittests/test_datasource/test_ovf.py | 55 +++++++++++++++--- tests/unittests/test_vmware/test_guestcust_util.py | 65 ++++++++++++++++++++++ 5 files changed, 169 insertions(+), 10 deletions(-) create mode 100644 tests/unittests/test_vmware/test_guestcust_util.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index dd941d2e..b1561892 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -40,11 +40,15 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_state \ from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( enable_nics, get_nics_to_enable, - set_customization_status + set_customization_status, + get_tools_config ) LOG = logging.getLogger(__name__) +CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg" +GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts" + class DataSourceOVF(sources.DataSource): @@ -148,6 +152,21 @@ class DataSourceOVF(sources.DataSource): product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name + custScriptConfig = get_tools_config( + CONFGROUPNAME_GUESTCUSTOMIZATION, + GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, + "true") + if custScriptConfig.lower() == "false": + # Update the customization status if there is a + # custom script is disabled + if special_customization and customscript: + msg = "Custom script is disabled by VM Administrator" + LOG.debug(msg) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED) + raise RuntimeError(msg) + ccScriptsDir = os.path.join( self.paths.get_cpath("scripts"), "per-instance") diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py index db5a00dc..65ae7390 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -10,5 +10,6 @@ class GuestCustErrorEnum(object): """Specifies different errors of Guest Customization engine""" GUESTCUST_ERROR_SUCCESS = 0 + GUESTCUST_ERROR_SCRIPT_DISABLED = 6 # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index a590f323..eb78172e 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -7,6 +7,7 @@ import logging import os +import re import time from cloudinit import util @@ -117,4 +118,40 @@ def enable_nics(nics): logger.warning("Can't connect network interfaces after %d attempts", enableNicsWaitRetries) + +def get_tools_config(section, key, defaultVal): + """ Return the value of [section] key from VMTools configuration. + + @param section: String of section to read from VMTools config + @returns: String value from key in [section] or defaultVal if + [section] is not present or vmware-toolbox-cmd is + not installed. + """ + + if not util.which('vmware-toolbox-cmd'): + logger.debug( + 'vmware-toolbox-cmd not installed, returning default value') + return defaultVal + + retValue = defaultVal + cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key] + + try: + (outText, _) = util.subp(cmd) + m = re.match(r'([a-zA-Z0-9 ]+)=(.*)', outText) + if m: + retValue = m.group(2).strip() + logger.debug("Get tools config: [%s] %s = %s", + section, key, retValue) + else: + logger.debug( + "Tools config: [%s] %s is not found, return default value: %s", + section, key, retValue) + except util.ProcessExecutionError as e: + logger.error("Failed running %s[%s]", cmd, e.exit_code) + logger.exception(e) + + return retValue + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 349d54cc..a615470a 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -169,19 +169,56 @@ class TestDatasourceOVF(CiTestCase): MARKER-ID = 12345345 """) util.write_file(conf_file, conf_content) - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) + with mock.patch(MPATH + 'get_tools_config', return_value='true'): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) customscript = self.tmp_path('test-script', self.tdir) self.assertIn('Script %s not found!!' % customscript, str(context.exception)) + def test_get_data_cust_script_disabled(self): + """If custom script is disabled by VMware tools configuration, + raise a RuntimeError. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + # Prepare the custom sript + customscript = self.tmp_path('test-script', self.tdir) + util.write_file(customscript, "This is the post cust script") + + with mock.patch(MPATH + 'get_tools_config', return_value='false'): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(RuntimeError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + self.assertIn('Custom script is disabled by VM Administrator', + str(context.exception)) + def test_get_data_non_vmware_seed_platform_info(self): """Platform info properly reports when on non-vmware platforms.""" paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py new file mode 100644 index 00000000..b8fa9942 --- /dev/null +++ b/tests/unittests/test_vmware/test_guestcust_util.py @@ -0,0 +1,65 @@ +# Copyright (C) 2019 Canonical Ltd. +# Copyright (C) 2019 VMware INC. +# +# Author: Xiaofeng Wang +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + get_tools_config, +) +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestGuestCustUtil(CiTestCase): + def test_get_tools_config_not_installed(self): + """ + This test is designed to verify the behavior if vmware-toolbox-cmd + is not installed. + """ + with mock.patch.object(util, 'which', return_value=None): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') + + def test_get_tools_config_internal_exception(self): + """ + This test is designed to verify the behavior if internal exception + is raised. + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + with mock.patch.object(util, 'subp', + return_value=('key=value', b''), + side_effect=util.ProcessExecutionError( + "subp failed", exit_code=99)): + # verify return value is 'defaultVal', not 'value'. + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'defaultVal') + + def test_get_tools_config_normal(self): + """ + This test is designed to verify the value could be parsed from + key = value of the given [section] + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + # value is not blank + with mock.patch.object(util, 'subp', + return_value=('key = value ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'value') + # value is blank + with mock.patch.object(util, 'subp', + return_value=('key = ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + '') + # value contains = + with mock.patch.object(util, 'subp', + return_value=('key=Bar=Wark', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'Bar=Wark') + +# vi: ts=4 expandtab -- cgit v1.2.3 From e7f16e215ef4fd6a31ddd2f1b585bbb6508bff06 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 11 Sep 2019 21:09:34 +0000 Subject: Brightbox: restrict detection to require full domain match .brightbox.com The detection for brightbox in both ds-identify and in identify_brightbox would incorrectly match the domain 'bobrightbox', which is not a brightbox platform. The fix here is to restrict matching to '*.brightbox.com' rather than '*brightbox.com' Also, while here remove a url to bug 1661693 which added the knowledge of brightbox. --- cloudinit/sources/DataSourceEc2.py | 2 +- tests/unittests/test_ds_identify.py | 16 ++++++++++++++-- tools/ds-identify | 3 +-- 3 files changed, 16 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 5c017bfb..10107456 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -473,7 +473,7 @@ def identify_aws(data): def identify_brightbox(data): - if data['serial'].endswith('brightbox.com'): + if data['serial'].endswith('.brightbox.com'): return CloudNames.BRIGHTBOX diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 587e6993..de87be29 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -195,6 +195,10 @@ class DsIdentifyBase(CiTestCase): return self._check_via_dict( data, RC_FOUND, dslist=[data.get('ds'), DS_NONE]) + def _test_ds_not_found(self, name): + data = copy.deepcopy(VALID_CFG[name]) + return self._check_via_dict(data, RC_NOT_FOUND) + def _check_via_dict(self, data, rc, dslist=None, **kwargs): ret = self._call_via_dict(data, **kwargs) good = False @@ -244,9 +248,13 @@ class TestDsIdentify(DsIdentifyBase): self._test_ds_found('Ec2-xen') def test_brightbox_is_ec2(self): - """EC2: product_serial ends with 'brightbox.com'""" + """EC2: product_serial ends with '.brightbox.com'""" self._test_ds_found('Ec2-brightbox') + def test_bobrightbox_is_not_brightbox(self): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-brightbox-negative') + def test_gce_by_product_name(self): """GCE identifies itself with product_name.""" self._test_ds_found('GCE') @@ -724,7 +732,11 @@ VALID_CFG = { }, 'Ec2-brightbox': { 'ds': 'Ec2', - 'files': {P_PRODUCT_SERIAL: 'facc6e2f.brightbox.com\n'}, + 'files': {P_PRODUCT_SERIAL: 'srv-otuxg.gb1.brightbox.com\n'}, + }, + 'Ec2-brightbox-negative': { + 'ds': 'Ec2', + 'files': {P_PRODUCT_SERIAL: 'tricky-host.bobrightbox.com\n'}, }, 'GCE': { 'ds': 'GCE', diff --git a/tools/ds-identify b/tools/ds-identify index e0d4865c..2447d14f 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -891,9 +891,8 @@ ec2_identify_platform() { local default="$1" local serial="${DI_DMI_PRODUCT_SERIAL}" - # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693 case "$serial" in - *brightbox.com) _RET="Brightbox"; return 0;; + *.brightbox.com) _RET="Brightbox"; return 0;; esac # AWS http://docs.aws.amazon.com/AWSEC2/ -- cgit v1.2.3 From 571f7c36e89f67f4c2d1cacfd8f9269bf864d560 Mon Sep 17 00:00:00 2001 From: Shixin Ruan Date: Wed, 18 Sep 2019 13:15:25 +0000 Subject: Add datasource for ZStack platform. Zstack platform provides a AWS Ec2 metadata service, and identifies their platform to the guest by setting the 'chassis asset tag' to a string that ends with '.zstack.io'. LP: #1841181 --- cloudinit/apport.py | 1 + cloudinit/sources/DataSourceEc2.py | 16 ++++++++++++- doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/zstack.rst | 36 +++++++++++++++++++++++++++++ tests/unittests/test_datasource/test_ec2.py | 28 ++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 9 +++++++- tools/ds-identify | 5 ++++ 7 files changed, 94 insertions(+), 2 deletions(-) create mode 100644 doc/rtd/topics/datasources/zstack.rst (limited to 'cloudinit/sources') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 003ff1ff..fde1f75b 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -37,6 +37,7 @@ KNOWN_CLOUD_NAMES = [ 'Scaleway', 'SmartOS', 'VMware', + 'ZStack', 'Other'] # Potentially clear text collected logs diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 10107456..6c72ace2 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -33,6 +33,7 @@ class CloudNames(object): ALIYUN = "aliyun" AWS = "aws" BRIGHTBOX = "brightbox" + ZSTACK = "zstack" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "unknown" @@ -477,10 +478,16 @@ def identify_brightbox(data): return CloudNames.BRIGHTBOX +def identify_zstack(data): + if data['asset_tag'].endswith('.zstack.io'): + return CloudNames.ZSTACK + + def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() - checks = (identify_aws, identify_brightbox, lambda x: CloudNames.UNKNOWN) + checks = (identify_aws, identify_brightbox, identify_zstack, + lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) @@ -498,6 +505,7 @@ def _collect_platform_data(): uuid: system-uuid from dmi or /sys/hypervisor uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) + asset_tag: 'dmidecode -s chassis-asset-tag' On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. @@ -520,6 +528,12 @@ def _collect_platform_data(): data['serial'] = serial.lower() + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag is None: + asset_tag = '' + + data['asset_tag'] = asset_tag.lower() + return data diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 8e58be97..a337c08c 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -45,6 +45,7 @@ The following is a list of documents for each supported datasource: datasources/oracle.rst datasources/ovf.rst datasources/smartos.rst + datasources/zstack.rst Creation diff --git a/doc/rtd/topics/datasources/zstack.rst b/doc/rtd/topics/datasources/zstack.rst new file mode 100644 index 00000000..36e60ffb --- /dev/null +++ b/doc/rtd/topics/datasources/zstack.rst @@ -0,0 +1,36 @@ +.. _datasource_zstack: + +ZStack +====== +ZStack platform provides a AWS Ec2 metadata service, but with different datasource identity. +More information about ZStack can be found at `ZStack `__. + +Discovery +--------- +To determine whether a vm running on ZStack platform, cloud-init checks DMI information +by 'dmidecode -s chassis-asset-tag', if the output ends with '.zstack.io', it's running +on ZStack platform: + + +Metadata +^^^^^^^^ +Same as EC2, instance metadata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/meta-data/ + instance-id + local-hostname + +Userdata +^^^^^^^^ +Same as EC2, instance userdata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/user-data/ + meta_data.json + user_data + password + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1ec8e009..6fabf258 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -662,4 +662,32 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): expected, ec2.convert_ec2_metadata_network_config(self.network_metadata)) + +class TesIdentifyPlatform(test_helpers.CiTestCase): + + def collmock(self, **kwargs): + """return non-special _collect_platform_data updated with changes.""" + unspecial = { + 'asset_tag': '3857-0037-2746-7462-1818-3997-77', + 'serial': 'H23-C4J3JV-R6', + 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', + 'uuid_source': 'dmi', + } + unspecial.update(**kwargs) + return unspecial + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack(self, m_collect): + """zstack should be identified if cassis-asset-tag ends in .zstack.io + """ + m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') + self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack_full_domain_only(self, m_collect): + """zstack asset-tag matching should match only on full domain boundary. + """ + m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index de87be29..7aeeb91c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -609,6 +609,10 @@ class TestDsIdentify(DsIdentifyBase): self.assertEqual(expected, [p for p in expected if p in toks], "path did not have expected tokens") + def test_zstack_is_ec2(self): + """EC2: chassis asset tag ends with 'zstack.io'""" + self._test_ds_found('Ec2-ZStack') + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -971,8 +975,11 @@ VALID_CFG = { {'name': 'blkid', 'ret': 2, 'out': ''}, ], 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'}, + }, + 'Ec2-ZStack': { + 'ds': 'Ec2', + 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, } - } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 2447d14f..f76f2a6e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -895,6 +895,11 @@ ec2_identify_platform() { *.brightbox.com) _RET="Brightbox"; return 0;; esac + local asset_tag="${DI_DMI_CHASSIS_ASSET_TAG}" + case "$asset_tag" in + *.zstack.io) _RET="ZStack"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" -- cgit v1.2.3 From 18286fc3f77ddbd05ba6c46f160c54092c63c10c Mon Sep 17 00:00:00 2001 From: Brian Candler Date: Wed, 25 Sep 2019 19:12:50 +0000 Subject: Add missing space in warning: "not avalid seed" --- cloudinit/sources/DataSourceNoCloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 8a9e5dd2..474773d9 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -127,7 +127,7 @@ class DataSourceNoCloud(sources.DataSource): seeded = util.mount_cb(dev, _pp2d_callback, pp2d_kwargs) except ValueError: - LOG.warning("device %s with label=%s not a" + LOG.warning("device %s with label=%s not a " "valid seed.", dev, label) continue -- cgit v1.2.3 From 762f230cb37bdceaed3fb264e9cede48c2749d3f Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 27 Sep 2019 14:48:52 +0000 Subject: ovf: do not generate random instance-id for IMC customization path Cloud-init will not operate properly if the instance-id value changes on each boot. This is the source of a number of behavioral bugs filed against cloud-init with OVF datasource. Instead, use a static instance-id value, iid-vmware-imc, similar to iid-dsovf. --- cloudinit/sources/DataSourceOVF.py | 4 +--- tests/unittests/test_vmware_config_file.py | 6 +++--- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index b1561892..e7794aab 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -406,9 +406,7 @@ def read_vmware_imc(config): if config.timezone: cfg['timezone'] = config.timezone - # Generate a unique instance-id so that re-customization will - # happen in cloud-init - md['instance-id'] = "iid-vmware-" + util.rand_str(strlen=8) + md['instance-id'] = "iid-vmware-imc" return (md, ud, cfg) diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index f47335ea..16343ed2 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -62,13 +62,13 @@ class TestVmwareConfigFile(CiTestCase): (md1, _, _) = read_vmware_imc(conf) self.assertIn(instance_id_prefix, md1["instance-id"]) - self.assertEqual(len(md1["instance-id"]), len(instance_id_prefix) + 8) + self.assertEqual(md1["instance-id"], 'iid-vmware-imc') (md2, _, _) = read_vmware_imc(conf) self.assertIn(instance_id_prefix, md2["instance-id"]) - self.assertEqual(len(md2["instance-id"]), len(instance_id_prefix) + 8) + self.assertEqual(md2["instance-id"], 'iid-vmware-imc') - self.assertNotEqual(md1["instance-id"], md2["instance-id"]) + self.assertEqual(md2["instance-id"], md1["instance-id"]) def test_configfile_static_2nics(self): """Tests Config class for a configuration with two static NICs.""" -- cgit v1.2.3 From 067516d7bc917e4921b9f1424b7a64e92cae0ad2 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 27 Sep 2019 20:46:00 +0000 Subject: util: json.dumps on python 2.7 will handle UnicodeDecodeError on binary Since python 2.7 doesn't handle UnicodeDecodeErrors with the default handler LP: #1801364 --- cloudinit/sources/tests/test_init.py | 12 +++++------- cloudinit/tests/test_util.py | 20 ++++++++++++++++++++ cloudinit/util.py | 27 +++++++++++++++++++++++++-- 3 files changed, 50 insertions(+), 9 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 6378e98b..9698261b 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -457,19 +457,17 @@ class TestDataSource(CiTestCase): instance_json['ds']['meta_data']) @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") - def test_non_utf8_encoding_logs_warning(self): - """When non-utf-8 values exist in py2 instance-data is not written.""" + def test_non_utf8_encoding_gets_b64encoded(self): + """When non-utf-8 values exist in py2 instance-data is b64encoded.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( self.sys_cfg, self.distro, Paths({'run_dir': tmp}), custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) self.assertTrue(datasource.get_data()) json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - self.assertFalse(os.path.exists(json_file)) - self.assertIn( - "WARNING: Error persisting instance-data.json: 'utf8' codec can't" - " decode byte 0xaa in position 2: invalid start byte", - self.logs.getvalue()) + instance_json = util.load_json(util.load_file(json_file)) + key21_value = instance_json['ds']['meta_data']['key2']['key2.1'] + self.assertEqual('ci-b64:' + util.b64e(b'ab\xaadef'), key21_value) def test_get_hostname_subclass_support(self): """Validate get_hostname signature on all subclasses of DataSource.""" diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index e3d2dbaa..f4f95e92 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -2,7 +2,9 @@ """Tests for cloudinit.util""" +import base64 import logging +import json import platform import cloudinit.util as util @@ -528,6 +530,24 @@ class TestGetLinuxDistro(CiTestCase): self.assertEqual(('foo', '1.1', 'aarch64'), dist) +class TestJsonDumps(CiTestCase): + def test_is_str(self): + """json_dumps should return a string.""" + self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str)) + + def test_utf8(self): + smiley = '\\ud83d\\ude03' + self.assertEqual( + {'smiley': smiley}, + json.loads(util.json_dumps({'smiley': smiley}))) + + def test_non_utf8(self): + blob = b'\xba\x03Qx-#y\xea' + self.assertEqual( + {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')}, + json.loads(util.json_dumps({'blob': blob}))) + + @mock.patch('os.path.exists') class TestIsLXD(CiTestCase): diff --git a/cloudinit/util.py b/cloudinit/util.py index aa23b3f3..6e8e73b0 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1599,10 +1599,33 @@ def json_serialize_default(_obj): return 'Warning: redacted unserializable type {0}'.format(type(_obj)) +def json_preserialize_binary(data): + """Preserialize any discovered binary values to avoid json.dumps issues. + + Used only on python 2.7 where default type handling is not honored for + failure to encode binary data. LP: #1801364. + TODO(Drop this function when py2.7 support is dropped from cloud-init) + """ + data = obj_copy.deepcopy(data) + for key, value in data.items(): + if isinstance(value, (dict)): + data[key] = json_preserialize_binary(value) + if isinstance(value, bytes): + data[key] = 'ci-b64:{0}'.format(b64e(value)) + return data + + def json_dumps(data): """Return data in nicely formatted json.""" - return json.dumps(data, indent=1, sort_keys=True, - separators=(',', ': '), default=json_serialize_default) + try: + return json.dumps( + data, indent=1, sort_keys=True, separators=(',', ': '), + default=json_serialize_default) + except UnicodeDecodeError: + if sys.version_info[:2] == (2, 7): + data = json_preserialize_binary(data) + return json.dumps(data) + raise def yaml_dumps(obj, explicit_start=True, explicit_end=True, noalias=False): -- cgit v1.2.3 From d3b1c4ae6bd237a04ba5df4306ff38f752f72132 Mon Sep 17 00:00:00 2001 From: Adam Dobrawy Date: Fri, 4 Oct 2019 23:15:10 +0000 Subject: Add RbxCloud datasource --- cloudinit/sources/DataSourceRbxCloud.py | 250 ++++++++++++++++++++++++++++ doc/rtd/topics/datasources/rbxcloud.rst | 25 +++ tests/unittests/test_datasource/test_rbx.py | 208 +++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 17 +- tools/ds-identify | 7 +- 5 files changed, 505 insertions(+), 2 deletions(-) create mode 100644 cloudinit/sources/DataSourceRbxCloud.py create mode 100644 doc/rtd/topics/datasources/rbxcloud.rst create mode 100644 tests/unittests/test_datasource/test_rbx.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py new file mode 100644 index 00000000..9a8c3d5c --- /dev/null +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -0,0 +1,250 @@ +# Copyright (C) 2018 Warsaw Data Center +# +# Author: Malwina Leis +# Author: Grzegorz Brzeski +# Author: Adam Dobrawy +# +# This file is part of cloud-init. See LICENSE file for license information. +""" +This file contains code used to gather the user data passed to an +instance on rootbox / hyperone cloud platforms +""" +import errno +import os +import os.path + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit.event import EventType + +LOG = logging.getLogger(__name__) +ETC_HOSTS = '/etc/hosts' + + +def get_manage_etc_hosts(): + hosts = util.load_file(ETC_HOSTS, quiet=True) + if hosts: + LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False') + return False + LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True') + return True + + +def ip2int(addr): + parts = addr.split('.') + return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \ + (int(parts[2]) << 8) + int(parts[3]) + + +def int2ip(addr): + return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]]) + + +def _sub_arp(cmd): + """ + Uses the prefered cloud-init subprocess def of util.subp + and runs arping. Breaking this to a separate function + for later use in mocking and unittests + """ + return util.subp(['arping'] + cmd) + + +def gratuitous_arp(items, distro): + source_param = '-S' + if distro.name in ['fedora', 'centos', 'rhel']: + source_param = '-s' + for item in items: + _sub_arp([ + '-c', '2', + source_param, item['source'], + item['destination'] + ]) + + +def get_md(): + rbx_data = None + devices = [ + dev + for dev, bdata in util.blkid().items() + if bdata.get('LABEL', '').upper() == 'CLOUDMD' + ] + for device in devices: + try: + rbx_data = util.mount_cb( + device=device, + callback=read_user_data_callback, + mtype=['vfat', 'fat'] + ) + if rbx_data: + break + except OSError as err: + if err.errno != errno.ENOENT: + raise + except util.MountFailedError: + util.logexc(LOG, "Failed to mount %s when looking for user " + "data", device) + if not rbx_data: + util.logexc(LOG, "Failed to load metadata and userdata") + return False + return rbx_data + + +def generate_network_config(netadps): + """Generate network configuration + + @param netadps: A list of network adapter settings + + @returns: A dict containing network config + """ + return { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'eth{}'.format(str(i)), + 'mac_address': netadp['macaddress'].lower(), + 'subnets': [ + { + 'type': 'static', + 'address': ip['address'], + 'netmask': netadp['network']['netmask'], + 'control': 'auto', + 'gateway': netadp['network']['gateway'], + 'dns_nameservers': netadp['network']['dns'][ + 'nameservers'] + } for ip in netadp['ip'] + ], + } for i, netadp in enumerate(netadps) + ] + } + + +def read_user_data_callback(mount_dir): + """This callback will be applied by util.mount_cb() on the mounted + drive. + + @param mount_dir: String representing path of directory where mounted drive + is available + + @returns: A dict containing userdata, metadata and cfg based on metadata. + """ + meta_data = util.load_json( + text=util.load_file( + fname=os.path.join(mount_dir, 'cloud.json'), + decode=False + ) + ) + user_data = util.load_file( + fname=os.path.join(mount_dir, 'user.data'), + quiet=True + ) + if 'vm' not in meta_data or 'netadp' not in meta_data: + util.logexc(LOG, "Failed to load metadata. Invalid format.") + return None + username = meta_data.get('additionalMetadata', {}).get('username') + ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', []) + + hash = None + if meta_data.get('additionalMetadata', {}).get('password'): + hash = meta_data['additionalMetadata']['password']['sha512'] + + network = generate_network_config(meta_data['netadp']) + + data = { + 'userdata': user_data, + 'metadata': { + 'instance-id': meta_data['vm']['_id'], + 'local-hostname': meta_data['vm']['name'], + 'public-keys': [] + }, + 'gratuitous_arp': [ + { + "source": ip["address"], + "destination": target + } + for netadp in meta_data['netadp'] + for ip in netadp['ip'] + for target in [ + netadp['network']["gateway"], + int2ip(ip2int(netadp['network']["gateway"]) + 2), + int2ip(ip2int(netadp['network']["gateway"]) + 3) + ] + ], + 'cfg': { + 'ssh_pwauth': True, + 'disable_root': True, + 'system_info': { + 'default_user': { + 'name': username, + 'gecos': username, + 'sudo': ['ALL=(ALL) NOPASSWD:ALL'], + 'passwd': hash, + 'lock_passwd': False, + 'ssh_authorized_keys': ssh_keys, + 'shell': '/bin/bash' + } + }, + 'network_config': network, + 'manage_etc_hosts': get_manage_etc_hosts(), + }, + } + + LOG.debug('returning DATA object:') + LOG.debug(data) + + return data + + +class DataSourceRbxCloud(sources.DataSource): + update_events = {'network': [ + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT + ]} + + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed = None + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + + def _get_data(self): + """ + Metadata is passed to the launching instance which + is used to perform instance configuration. + """ + rbx_data = get_md() + self.userdata_raw = rbx_data['userdata'] + self.metadata = rbx_data['metadata'] + self.gratuitous_arp = rbx_data['gratuitous_arp'] + self.cfg = rbx_data['cfg'] + return True + + @property + def network_config(self): + return self.cfg['network_config'] + + def get_public_ssh_keys(self): + return self.metadata['public-keys'] + + def get_userdata_raw(self): + return self.userdata_raw + + def get_config_obj(self): + return self.cfg + + def activate(self, cfg, is_new_instance): + gratuitous_arp(self.gratuitous_arp, self.distro) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceRbxCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst new file mode 100644 index 00000000..3d465bed --- /dev/null +++ b/doc/rtd/topics/datasources/rbxcloud.rst @@ -0,0 +1,25 @@ +.. _datasource_config_drive: + +Rbx Cloud +========= + +The Rbx datasource consumes the metadata drive available on platform +`HyperOne`_ and `Rootbox`_ platform. + +Datasource supports, in particular, network configurations, hostname, +user accounts and user metadata. + +Metadata drive +-------------- + +Drive metadata is a `FAT`_-formatted partition with the ```CLOUDMD``` label on +the system disk. Its contents are refreshed each time the virtual machine +is restarted, if the partition exists. For more information see +`HyperOne docs`_. + +.. _HyperOne: http://www.hyperone.com/ +.. _Rootbox: https://rootbox.com/ +.. _HyperOne Virtual Machine docs: http://www.hyperone.com/ +.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py new file mode 100644 index 00000000..aabf1f18 --- /dev/null +++ b/tests/unittests/test_datasource/test_rbx.py @@ -0,0 +1,208 @@ +import json + +from cloudinit import helpers +from cloudinit import distros +from cloudinit.sources import DataSourceRbxCloud as ds +from cloudinit.tests.helpers import mock, CiTestCase, populate_dir + +DS_PATH = "cloudinit.sources.DataSourceRbxCloud" + +CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ + "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ + "tToyGP41.s1" + +CLOUD_METADATA = { + "vm": { + "memory": 4, + "cpu": 2, + "name": "vm-image-builder", + "_id": "5beab44f680cffd11f0e60fc" + }, + "additionalMetadata": { + "username": "guru", + "sshKeys": ["ssh-rsa ..."], + "password": { + "sha512": CRYPTO_PASS + } + }, + "disk": [ + {"size": 10, "type": "ssd", + "name": "vm-image-builder-os", + "_id": "5beab450680cffd11f0e60fe"}, + {"size": 2, "type": "ssd", + "name": "ubuntu-1804-bionic", + "_id": "5bef002c680cffd11f107590"} + ], + "netadp": [ + { + "ip": [{"address": "62.181.8.174"}], + "network": { + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "routing": [], + "gateway": "62.181.8.1", + "netmask": "255.255.248.0", + "name": "public", + "type": "public", + "_id": "5784e97be2627505227b578c" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:03", + "_id": "5beab450680cffd11f0e6102" + }, + { + "ip": [{"address": "10.209.78.11"}], + "network": { + "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, + "routing": [], + "gateway": "10.209.78.1", + "netmask": "255.255.255.0", + "name": "network-determined-bardeen", + "type": "private", + "_id": "5beaec64680cffd11f0e7c31" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:24", + "_id": "5bec18c6680cffd11f0f0d8b" + } + ], + "dvddrive": [{"iso": {}}] +} + + +class TestRbxDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def setUp(self): + super(TestRbxDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp} + ) + + # defaults for few tests + self.ds = ds.DataSourceRbxCloud + self.seed_dir = self.paths.seed_dir + self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} + + def test_seed_read_user_data_callback_empty_file(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, {}) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertIsNone(results) + + def test_seed_read_user_data_callback_valid_disk(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertTrue('metadata' in results) + self.assertTrue('cfg' in results) + + def test_seed_read_user_data_callback_userdata(self): + userdata = "#!/bin/sh\nexit 1" + populate_user_metadata(self.seed_dir, userdata) + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertEqual(results['userdata'], userdata) + + def test_generate_network_config(self): + expected = { + 'version': 1, + 'config': [ + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'netmask': '255.255.248.0', + 'address': '62.181.8.174', + 'type': 'static', 'gateway': '62.181.8.1'} + ], + 'type': 'physical', + 'name': 'eth0', + 'mac_address': '00:15:5d:ff:0f:03' + }, + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], + 'netmask': '255.255.255.0', + 'address': '10.209.78.11', + 'type': 'static', + 'gateway': '10.209.78.1'} + ], + 'type': 'physical', + 'name': 'eth1', + 'mac_address': '00:15:5d:ff:0f:24' + } + ] + } + self.assertTrue( + ds.generate_network_config(CLOUD_METADATA['netadp']), + expected + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_gratuitous_arp_run_standard_arping(self, m_subp): + """Test handle run arping & parameters.""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_handle_rhel_like_arping(self, m_subp): + """Test handle on RHEL-like distros.""" + items = [ + { + 'source': '172.16.6.104', + 'destination': '172.17.0.2', + } + ] + ds.gratuitous_arp(items, self._fetch_distro('fedora')) + self.assertEqual([ + mock.call( + ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] + )], + m_subp.call_args_list + ) + + +def populate_cloud_metadata(path, data): + populate_dir(path, {'cloud.json': json.dumps(data)}) + + +def populate_user_metadata(path, data): + populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7aeeb91c..c5b5c46c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -267,10 +267,13 @@ class TestDsIdentify(DsIdentifyBase): """ConfigDrive datasource has a disk with LABEL=config-2.""" self._test_ds_found('ConfigDrive') + def test_rbx_cloud(self): + """Rbx datasource has a disk with LABEL=CLOUDMD.""" + self._test_ds_found('RbxCloud') + def test_config_drive_upper(self): """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" self._test_ds_found('ConfigDriveUpper') - return def test_config_drive_seed(self): """Config Drive seed directory.""" @@ -896,6 +899,18 @@ VALID_CFG = { os.path.join(P_SEED_DIR, 'config_drive', 'openstack', 'latest', 'meta_data.json'): 'md\n'}, }, + 'RbxCloud': { + 'ds': 'RbxCloud', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CLOUDMD'}] + )}, + ], + }, 'Hetzner': { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, diff --git a/tools/ds-identify b/tools/ds-identify index f76f2a6e..40fc0604 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -702,6 +702,11 @@ dscheck_OpenNebula() { return ${DS_NOT_FOUND} } +dscheck_RbxCloud() { + has_fs_with_label "CLOUDMD" "cloudmd" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + ovf_vmware_guest_customization() { # vmware guest customization -- cgit v1.2.3 From 7d5d34f3643a2108d667759f57a5ab63d0affadd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 11 Oct 2019 14:54:49 +0000 Subject: Add Support for e24cloud to Ec2 datasource. e24cloud provides an EC2 compatible datasource. This just identifies their platform based on dmi 'system-vendor' having 'e24cloud'. https://www.e24cloud.com/en/ . Updated chassis typo in zstack unit test docstring. LP: #1696476 --- cloudinit/apport.py | 1 + cloudinit/sources/DataSourceEc2.py | 12 +++++++++++- doc/rtd/topics/datasources.rst | 3 ++- doc/rtd/topics/datasources/e24cloud.rst | 9 +++++++++ tests/unittests/test_datasource/test_ec2.py | 15 ++++++++++++++- tests/unittests/test_ds_identify.py | 18 +++++++++++++++++- tools/ds-identify | 5 +++++ 7 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 doc/rtd/topics/datasources/e24cloud.rst (limited to 'cloudinit/sources') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index fde1f75b..c6797f12 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -22,6 +22,7 @@ KNOWN_CLOUD_NAMES = [ 'CloudSigma', 'CloudStack', 'DigitalOcean', + 'E24Cloud', 'GCE - Google Compute Engine', 'Exoscale', 'Hetzner Cloud', diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6c72ace2..1d88c9b1 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -34,6 +34,7 @@ class CloudNames(object): AWS = "aws" BRIGHTBOX = "brightbox" ZSTACK = "zstack" + E24CLOUD = "e24cloud" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "unknown" @@ -483,11 +484,16 @@ def identify_zstack(data): return CloudNames.ZSTACK +def identify_e24cloud(data): + if data['vendor'] == 'e24cloud': + return CloudNames.E24CLOUD + + def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() checks = (identify_aws, identify_brightbox, identify_zstack, - lambda x: CloudNames.UNKNOWN) + identify_e24cloud, lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) @@ -506,6 +512,7 @@ def _collect_platform_data(): uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) asset_tag: 'dmidecode -s chassis-asset-tag' + vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. @@ -534,6 +541,9 @@ def _collect_platform_data(): data['asset_tag'] = asset_tag.lower() + vendor = util.read_dmi_data('system-manufacturer') + data['vendor'] = (vendor if vendor else '').lower() + return data diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index a337c08c..70fbe07d 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -29,8 +29,9 @@ The following is a list of documents for each supported datasource: datasources/aliyun.rst datasources/altcloud.rst - datasources/ec2.rst datasources/azure.rst + datasources/ec2.rst + datasources/e24cloud.rst datasources/cloudsigma.rst datasources/cloudstack.rst datasources/configdrive.rst diff --git a/doc/rtd/topics/datasources/e24cloud.rst b/doc/rtd/topics/datasources/e24cloud.rst new file mode 100644 index 00000000..de9a4127 --- /dev/null +++ b/doc/rtd/topics/datasources/e24cloud.rst @@ -0,0 +1,9 @@ +.. _datasource_e24cloud: + +E24Cloud +======== +`E24Cloud ` platform provides an AWS Ec2 metadata +service clone. It identifies itself to guests using the dmi +system-manufacturer (/sys/class/dmi/id/sys_vendor). + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 6fabf258..5e1dd777 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -672,13 +672,14 @@ class TesIdentifyPlatform(test_helpers.CiTestCase): 'serial': 'H23-C4J3JV-R6', 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', 'uuid_source': 'dmi', + 'vendor': 'tothecloud', } unspecial.update(**kwargs) return unspecial @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') def test_identify_zstack(self, m_collect): - """zstack should be identified if cassis-asset-tag ends in .zstack.io + """zstack should be identified if chassis-asset-tag ends in .zstack.io """ m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) @@ -690,4 +691,16 @@ class TesIdentifyPlatform(test_helpers.CiTestCase): m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloud') + self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud_negative(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloudyday') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index c5b5c46c..12c6ae36 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -616,6 +616,14 @@ class TestDsIdentify(DsIdentifyBase): """EC2: chassis asset tag ends with 'zstack.io'""" self._test_ds_found('Ec2-ZStack') + def test_e24cloud_is_ec2(self): + """EC2: e24cloud identified by sys_vendor""" + self._test_ds_found('Ec2-E24Cloud') + + def test_e24cloud_not_active(self): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-E24Cloud-negative') + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -994,7 +1002,15 @@ VALID_CFG = { 'Ec2-ZStack': { 'ds': 'Ec2', 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, - } + }, + 'Ec2-E24Cloud': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloud\n'}, + }, + 'Ec2-E24Cloud-negative': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, + } } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 40fc0604..20a99ee9 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -905,6 +905,11 @@ ec2_identify_platform() { *.zstack.io) _RET="ZStack"; return 0;; esac + local vendor="${DI_DMI_SYS_VENDOR}" + case "$vendor" in + e24cloud) _RET="E24cloud"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" -- cgit v1.2.3 From fac98983187c0984aa79c569c4b76cab90fd6f47 Mon Sep 17 00:00:00 2001 From: Harald Jensås Date: Wed, 16 Oct 2019 15:30:28 +0000 Subject: net: handle openstack dhcpv6-stateless configuration Openstack subnets can be configured to use SLAAC by setting ipv6_address_mode=dhcpv6-stateless. When this is the case the sysconfig interface configuration should use IPV6_AUTOCONF=yes and not set DHCPV6C=yes. This change sets the subnets type property to the full network['type'] from openstack metadata. cloudinit/net/sysconfig.py and cloudinit/net/eni.py are updated to support new subnet types: - 'ipv6_dhcpv6-stateless' => IPV6_AUTOCONF=yes - 'ipv6_dhcpv6-stateful' => DHCPV6C=yes Type 'dhcp6' in sysconfig is kept for backward compatibility with any implementations that set subnet_type == 'dhcp6'. LP: #1847517 --- cloudinit/net/eni.py | 7 +- cloudinit/net/sysconfig.py | 7 +- cloudinit/sources/helpers/openstack.py | 3 +- .../unittests/test_datasource/test_configdrive.py | 39 ++++++++++ tests/unittests/test_net.py | 88 ++++++++++++++++++++++ 5 files changed, 141 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index b129bb62..530922b5 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -411,8 +411,13 @@ class Renderer(renderer.Renderer): else: ipv4_subnet_mtu = subnet.get('mtu') iface['inet'] = subnet_inet - if subnet['type'].startswith('dhcp'): + if (subnet['type'] == 'dhcp4' or subnet['type'] == 'dhcp6' or + subnet['type'] == 'ipv6_dhcpv6-stateful'): + # Configure network settings using DHCP or DHCPv6 iface['mode'] = 'dhcp' + elif subnet['type'] == 'ipv6_dhcpv6-stateless': + # Configure network settings using SLAAC from RAs + iface['mode'] = 'auto' # do not emit multiple 'auto $IFACE' lines as older (precise) # ifupdown complains diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 87b548e5..4e656768 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -343,10 +343,15 @@ class Renderer(renderer.Renderer): for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): mtu_key = 'MTU' subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': + if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful': # TODO need to set BOOTPROTO to dhcp6 on SUSE iface_cfg['IPV6INIT'] = True + # Configure network settings using DHCPv6 iface_cfg['DHCPV6C'] = True + elif subnet_type == 'ipv6_dhcpv6-stateless': + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs + iface_cfg['IPV6_AUTOCONF'] = True elif subnet_type in ['dhcp4', 'dhcp']: iface_cfg['BOOTPROTO'] = 'dhcp' elif subnet_type == 'static': diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 8f069115..d1c4601a 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -585,7 +585,8 @@ def convert_net_json(network_json=None, known_macs=None): subnet = dict((k, v) for k, v in network.items() if k in valid_keys['subnet']) if 'dhcp' in network['type']: - t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' + t = (network['type'] if network['type'].startswith('ipv6') + else 'dhcp4') subnet.update({ 'type': t, }) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 520c50fe..8c788c1c 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -499,6 +499,45 @@ class TestNetJson(CiTestCase): known_macs=KNOWN_MACS) self.assertEqual(myds.network_config, network_config) + def test_network_config_conversion_dhcp6(self): + """Test some ipv6 input network json and check the expected + conversions.""" + in_data = { + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + ] + } + out_data = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:69:b0:58', + 'mtu': None, + 'name': 'enp0s1', + 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], + 'type': 'physical'}, + {'mac_address': 'fa:16:3e:d4:57:ad', + 'mtu': None, + 'name': 'enp0s2', + 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], + 'type': 'physical'} + ], + } + conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) + self.assertEqual(out_data, conv_data) + def test_network_config_conversions(self): """Tests a bunch of input network json and checks the expected conversions.""" diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b6597412..f5a9cae6 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1070,6 +1070,82 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_stateless': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateless'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_stateful': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateful'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'all': { 'expected_eni': ("""\ auto lo @@ -2781,6 +2857,18 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_dhcpv6_stateless_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_stateful_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateful'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + def test_check_ifcfg_rh(self): """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" render_dir = self.tmp_dir() -- cgit v1.2.3 From a86829d32284ea4842f13035442b1a5127997018 Mon Sep 17 00:00:00 2001 From: Dominic Schlegel Date: Thu, 17 Oct 2019 15:00:48 +0000 Subject: fix some more typos in comments --- cloudinit/net/__init__.py | 2 +- cloudinit/sources/DataSourceConfigDrive.py | 2 +- cloudinit/sources/DataSourceNoCloud.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 307da780..bd806378 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -389,7 +389,7 @@ def find_fallback_nic(blacklist_drivers=None): potential_interfaces = possibly_connected # if eth0 exists use it above anything else, otherwise get the interface - # that we can read 'first' (using the sorted defintion of first). + # that we can read 'first' (using the sorted definition of first). names = list(sorted(potential_interfaces, key=natural_sort_key)) if DEFAULT_PRIMARY_INTERFACE in names: names.remove(DEFAULT_PRIMARY_INTERFACE) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 571d30dc..c3627152 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -234,7 +234,7 @@ def find_candidate_devs(probe_optical=True, dslist=None): config drive v2: Disk should be: - * either vfat or iso9660 formated + * either vfat or iso9660 formatted * labeled with 'config-2' or 'CONFIG-2' """ if dslist is None: diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 474773d9..ee748b41 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -266,7 +266,7 @@ def load_cmdline_data(fill, cmdline=None): ("ds=nocloud-net", sources.DSMODE_NETWORK)] for idstr, dsmode in pairs: if parse_cmdline_data(idstr, fill, cmdline): - # if dsmode was explicitly in the commanad line, then + # if dsmode was explicitly in the command line, then # prefer it to the dsmode based on the command line id if 'dsmode' not in fill: fill['dsmode'] = dsmode -- cgit v1.2.3 From ecb501b84338f078be18c38c68c3ce87fed3584b Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Thu, 17 Oct 2019 15:18:44 +0000 Subject: guestcust_util: handle special characters in config file Handle the special characters when reading VM Tools configure file. For example, the key and value may contain _, - and . etc. --- cloudinit/sources/helpers/vmware/imc/guestcust_util.py | 2 +- tests/unittests/test_vmware/test_guestcust_util.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index eb78172e..3d369d04 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -138,7 +138,7 @@ def get_tools_config(section, key, defaultVal): try: (outText, _) = util.subp(cmd) - m = re.match(r'([a-zA-Z0-9 ]+)=(.*)', outText) + m = re.match(r'([^=]+)=(.*)', outText) if m: retValue = m.group(2).strip() logger.debug("Get tools config: [%s] %s = %s", diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py index b8fa9942..b175a998 100644 --- a/tests/unittests/test_vmware/test_guestcust_util.py +++ b/tests/unittests/test_vmware/test_guestcust_util.py @@ -62,4 +62,11 @@ class TestGuestCustUtil(CiTestCase): get_tools_config('section', 'key', 'defaultVal'), 'Bar=Wark') + # value contains specific characters + with mock.patch.object(util, 'subp', + return_value=('[a] b.c_d=e-f', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'e-f') + # vi: ts=4 expandtab -- cgit v1.2.3 From 3e2482e8aa6630ca9bc115dc1f82d44d3fde1681 Mon Sep 17 00:00:00 2001 From: Chris Glass Date: Thu, 24 Oct 2019 17:32:58 +0000 Subject: exoscale: Increase url_max_wait to 120s. The exoscale datasource defines a shorter timeout than the default (10) but did not override url_max_wait, resulting in a single attempt being made to wait for the metadata service. In some rare cases, a race condition means the route to the metadata service is not set within 10 seconds, and more attempts should be made. This sets the url_max_wait for the datasource to 120. --- cloudinit/sources/DataSourceExoscale.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py index fdfb4ed3..4616daa7 100644 --- a/cloudinit/sources/DataSourceExoscale.py +++ b/cloudinit/sources/DataSourceExoscale.py @@ -26,6 +26,8 @@ class DataSourceExoscale(sources.DataSource): dsname = 'Exoscale' + url_max_wait = 120 + def __init__(self, sys_cfg, distro, paths): super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) LOG.debug("Initializing the Exoscale datasource") -- cgit v1.2.3 From e1b4b8c903fed3b69e57ec08c17ce94097d55901 Mon Sep 17 00:00:00 2001 From: Sam Eiderman Date: Tue, 29 Oct 2019 23:00:36 +0000 Subject: azure: Do not lock user on instance id change After initial boot ovf-env.xml is copied to agent dir (/var/lib/waagent/) with REDACTED password. On subsequent boots DataSourceAzure loads with a configuration where the user specified in /var/lib/waagent/ovf-env.xml is locked. If instance id changes, cc_users_groups action will lock the user. Fix this behavior by not locking the user if its password is REDACTED. LP: #1849677 --- cloudinit/sources/DataSourceAzure.py | 5 +++-- tests/unittests/test_datasource/test_azure.py | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 4984fa84..cdf49d36 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1193,9 +1193,10 @@ def read_azure_ovf(contents): defuser = {} if username: defuser['name'] = username - if password and DEF_PASSWD_REDACTION != password: - defuser['passwd'] = encrypt_pass(password) + if password: defuser['lock_passwd'] = False + if DEF_PASSWD_REDACTION != password: + defuser['passwd'] = encrypt_pass(password) if defuser: cfg['system_info'] = {'default_user': defuser} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3547dd94..80c6f019 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -769,6 +769,22 @@ scbus-1 on xpt0 bus 0 crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) + def test_user_not_locked_if_password_redacted(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': dsaz.DEF_PASSWD_REDACTION} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue('default_user' in dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user should be updated username and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertIn('lock_passwd', defuser) + self.assertFalse(defuser['lock_passwd']) + def test_userdata_plain(self): mydata = "FOOBAR" odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} -- cgit v1.2.3 From a61ee02a50eb21954c114e01d2d042916bb2dc14 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Thu, 31 Oct 2019 15:15:51 +0000 Subject: OVF: disable custom script execution by default For security concern, we disable the custom script by default.If a custom script is provided, stop customization unless the custom script is explicitly enabled by tools config. --- cloudinit/sources/DataSourceOVF.py | 4 ++-- tests/unittests/test_datasource/test_ovf.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index e7794aab..896841e3 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -155,8 +155,8 @@ class DataSourceOVF(sources.DataSource): custScriptConfig = get_tools_config( CONFGROUPNAME_GUESTCUSTOMIZATION, GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, - "true") - if custScriptConfig.lower() == "false": + "false") + if custScriptConfig.lower() != "true": # Update the customization status if there is a # custom script is disabled if special_customization and customscript: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index a615470a..a19c35c8 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -204,7 +204,7 @@ class TestDatasourceOVF(CiTestCase): customscript = self.tmp_path('test-script', self.tdir) util.write_file(customscript, "This is the post cust script") - with mock.patch(MPATH + 'get_tools_config', return_value='false'): + with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): with mock.patch(MPATH + 'set_customization_status', return_value=('msg', b'')): with self.assertRaises(RuntimeError) as context: -- cgit v1.2.3 From e81389592a67bb54b889512928dcdf65f87ad436 Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Thu, 31 Oct 2019 19:45:29 +0000 Subject: DataSourceSmartOS: reconfigure network on each boot In typical cases, SmartOS does not use DHCP for network configuration. As such, if the network configuration changes that is reflected in metadata and will be picked up during the next boot. LP: #1765801 Joyent: OS-6902 reconfigure network on each boot --- cloudinit/sources/DataSourceSmartOS.py | 8 +++++++- tests/unittests/test_datasource/test_smartos.py | 9 ++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 32b57cdd..cf676504 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -1,5 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. -# Copyright (c) 2018, Joyent, Inc. +# Copyright 2019 Joyent, Inc. # # Author: Ben Howard # @@ -34,6 +34,7 @@ from cloudinit import log as logging from cloudinit import serial from cloudinit import sources from cloudinit import util +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -178,6 +179,7 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = {} self.network_data = None self._network_config = None + self.update_events['network'].add(EventType.BOOT) self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) @@ -319,6 +321,10 @@ class DataSourceSmartOS(sources.DataSource): @property def network_config(self): + # sources.clear_cached_data() may set _network_config to '_unset'. + if self._network_config == sources.UNSET: + self._network_config = None + if self._network_config is None: if self.network_data is not None: self._network_config = ( diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 42ac6971..d5b1c29c 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -1,5 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. -# Copyright (c) 2018, Joyent, Inc. +# Copyright 2019 Joyent, Inc. # # Author: Ben Howard # @@ -31,6 +31,7 @@ from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, identify_file) +from cloudinit.event import EventType import six @@ -653,6 +654,12 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.assertEqual(dsrc.device_name_to_device('FOO'), mydscfg['disk_aliases']['FOO']) + def test_reconfig_network_on_boot(self): + # Test to ensure that network is configured from metadata on each boot + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]), + dsrc.update_events['network']) + class TestIdentifyFile(CiTestCase): """Test the 'identify_file' utility.""" -- cgit v1.2.3 From 15fa154602f281c9239084d7d20a0999c6b09970 Mon Sep 17 00:00:00 2001 From: David Kindred Date: Mon, 4 Nov 2019 22:00:19 +0000 Subject: configdrive: fix subplatform config-drive for /config-drive source When ConfigDrive discovers the source path /config-drive, subplatform is now reports 'config-drive' LP: #1849731 --- cloudinit/sources/DataSourceConfigDrive.py | 6 +++--- .../unittests/test_datasource/test_configdrive.py | 23 ++++++++++++++++++---- 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index c3627152..f77923c2 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -163,10 +163,10 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): def _get_subplatform(self): """Return the subplatform metadata source details.""" - if self.seed_dir in self.source: - subplatform_type = 'seed-dir' - elif self.source.startswith('/dev'): + if self.source.startswith('/dev'): subplatform_type = 'config-disk' + else: + subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.source) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 8c788c1c..cfb3b0a7 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -220,13 +220,15 @@ CFG_DRIVE_FILES_V2 = { 'openstack/2015-10-15/user_data': USER_DATA, 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} +M_PATH = "cloudinit.sources.DataSourceConfigDrive." + class TestConfigDriveDataSource(CiTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() self.add_patch( - "cloudinit.sources.DataSourceConfigDrive.util.find_devs_with", + M_PATH + "util.find_devs_with", "m_find_devs_with", return_value=[]) self.tmp = self.tmp_dir() @@ -468,7 +470,7 @@ class TestConfigDriveDataSource(CiTestCase): util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_pubkeys_v2(self, on_first_boot): """Verify that public-keys work in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) @@ -478,6 +480,19 @@ class TestConfigDriveDataSource(CiTestCase): self.assertEqual('openstack', myds.platform) self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) + def test_subplatform_config_drive_when_starts_with_dev(self): + """subplatform reports config-drive when source starts with /dev/.""" + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs: + with mock.patch(M_PATH + 'util.is_FreeBSD', return_value=False): + with mock.patch(M_PATH + 'util.mount_cb'): + with mock.patch(M_PATH + 'on_first_boot'): + m_find_devs.return_value = ['/dev/anything'] + self.assertEqual(True, cfg_ds.get_data()) + self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) + class TestNetJson(CiTestCase): def setUp(self): @@ -485,13 +500,13 @@ class TestNetJson(CiTestCase): self.tmp = self.tmp_dir() self.maxDiff = None - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_network_data_is_found(self, on_first_boot): """Verify that network_data is present in ds in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertIsNotNone(myds.network_json) - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_network_config_is_converted(self, on_first_boot): """Verify that network_data is converted and present on ds object.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) -- cgit v1.2.3 From 02f07b666adc62d70c4f1a98c2ae80cb6629fa9a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 4 Nov 2019 22:11:37 +0000 Subject: azure: support matching dhcp route-metrics for dual-stack ipv4 ipv6 Network v2 configuration for Azure will set both dhcp4 and dhcp6 to False by default. When IPv6 privateIpAddresses are present for an interface in Azure's Instance Metadata Service (IMDS), set dhcp6: True and provide a route-metric value that will match the corresponding dhcp4 route-metric. The route-metric value will increase by 100 for each additional interface present to ensure the primary interface has a route to IMDS. Also fix dhcp route-metric rendering for eni and sysconfig distros. LP: #1850308 --- cloudinit/net/network_state.py | 17 ++++- cloudinit/net/sysconfig.py | 6 +- cloudinit/sources/DataSourceAzure.py | 10 ++- tests/unittests/test_datasource/test_azure.py | 101 ++++++++++++++++++++++++++ tests/unittests/test_net.py | 54 ++++++++++++++ 5 files changed, 178 insertions(+), 10 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index ba85c69e..20b7716b 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -22,8 +22,9 @@ NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } NETWORK_V2_KEY_FILTER = [ - 'addresses', 'dhcp4', 'dhcp6', 'gateway4', 'gateway6', 'interfaces', - 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan' + 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides', + 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers', + 'renderer', 'set-name', 'wakeonlan' ] NET_CONFIG_TO_V2 = { @@ -747,12 +748,20 @@ class NetworkStateInterpreter(object): def _v2_to_v1_ipcfg(self, cfg): """Common ipconfig extraction from v2 to v1 subnets array.""" + def _add_dhcp_overrides(overrides, subnet): + if 'route-metric' in overrides: + subnet['metric'] = overrides['route-metric'] + subnets = [] if cfg.get('dhcp4'): - subnets.append({'type': 'dhcp4'}) + subnet = {'type': 'dhcp4'} + _add_dhcp_overrides(cfg.get('dhcp4-overrides', {}), subnet) + subnets.append(subnet) if cfg.get('dhcp6'): + subnet = {'type': 'dhcp6'} self.use_ipv6 = True - subnets.append({'type': 'dhcp6'}) + _add_dhcp_overrides(cfg.get('dhcp6-overrides', {}), subnet) + subnets.append(subnet) gateway4 = None gateway6 = None diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 6717d924..fe0c67ca 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -395,6 +395,9 @@ class Renderer(renderer.Renderer): ipv6_index = -1 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): subnet_type = subnet.get('type') + # metric may apply to both dhcp and static config + if 'metric' in subnet: + iface_cfg['METRIC'] = subnet['metric'] if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: if has_default_route and iface_cfg['BOOTPROTO'] != 'none': iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False @@ -426,9 +429,6 @@ class Renderer(renderer.Renderer): else: iface_cfg['GATEWAY'] = subnet['gateway'] - if 'metric' in subnet: - iface_cfg['METRIC'] = subnet['metric'] - if 'dns_search' in subnet: iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search']) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index cdf49d36..44cca210 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1322,7 +1322,8 @@ def parse_network_config(imds_metadata): network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} + dev_config = {'dhcp4': False, 'dhcp6': False} + dhcp_override = {'route-metric': (idx + 1) * 100} for addr4 in intf['ipv4']['ipAddress']: privateIpv4 = addr4['privateIpAddress'] if privateIpv4: @@ -1340,12 +1341,15 @@ def parse_network_config(imds_metadata): # non-primary interfaces should have a higher # route-metric (cost) so default routes prefer # primary nic due to lower route-metric value - dev_config['dhcp4-overrides'] = { - 'route-metric': (idx + 1) * 100} + dev_config['dhcp4-overrides'] = dhcp_override for addr6 in intf['ipv6']['ipAddress']: privateIpv6 = addr6['privateIpAddress'] if privateIpv6: dev_config['dhcp6'] = True + # non-primary interfaces should have a higher + # route-metric (cost) so default routes prefer + # primary nic due to lower route-metric value + dev_config['dhcp6-overrides'] = dhcp_override break if dev_config: mac = ':'.join(re.findall(r'..', intf['macAddress'])) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 80c6f019..d92d7b2f 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -153,6 +153,102 @@ SECONDARY_INTERFACE = { MOCKPATH = 'cloudinit.sources.DataSourceAzure.' +class TestParseNetworkConfig(CiTestCase): + + maxDiff = None + + def test_single_ipv4_nic_configuration(self): + """parse_network_config emits dhcp on single nic with ipv4""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) + + def test_increases_route_metric_for_non_primary_nics(self): + """parse_network_config increases route-metric for each nic""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_and_ipv6_route_metrics_match_for_nics(self): + """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp6': False, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + third_intf['ipv6'] = { + "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_secondary_ips_will_be_static_addrs(self): + """parse_network_config emits primary ipv4 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + # Secondary ipv6 addresses currently ignored/unconfigured + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + class TestGetMetadataFromIMDS(HttprettyTestCase): with_logs = True @@ -641,6 +737,7 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}}}, 'version': 2} @@ -658,14 +755,17 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}}, 'eth1': {'set-name': 'eth1', 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200}}, 'eth2': {'set-name': 'eth2', 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} @@ -999,6 +1099,7 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}}, 'version': 2} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 6f83ad73..35ce55d2 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3101,6 +3101,36 @@ USERCTL=no self._compare_files_to_expected( expected, self._render_and_read(network_config=v2data)) + def test_from_v2_route_metric(self): + """verify route-metric gets rendered on nic when source is netplan.""" + overrides = {'route-metric': 100} + v2base = { + 'version': 2, + 'ethernets': { + 'eno1': {'dhcp4': True, + 'match': {'macaddress': '07-1c-c6-75-a4-be'}}}} + expected = { + 'ifcfg-eno1': textwrap.dedent("""\ + BOOTPROTO=dhcp + DEVICE=eno1 + HWADDR=07-1c-c6-75-a4-be + METRIC=100 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + } + for dhcp_ver in ('dhcp4', 'dhcp6'): + v2data = copy.deepcopy(v2base) + if dhcp_ver == 'dhcp6': + expected['ifcfg-eno1'] += "IPV6INIT=yes\nDHCPV6C=yes\n" + v2data['ethernets']['eno1'].update( + {dhcp_ver: True, '{0}-overrides'.format(dhcp_ver): overrides}) + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + class TestOpenSuseSysConfigRendering(CiTestCase): @@ -3466,6 +3496,30 @@ iface eth0 inet dhcp self.assertEqual( expected, dir2dict(tmp_dir)['/etc/network/interfaces']) + def test_v2_route_metric_to_eni(self): + """Network v2 route-metric overrides are preserved in eni output""" + tmp_dir = self.tmp_dir() + renderer = eni.Renderer() + expected_tmpl = textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet{suffix} dhcp + metric 100 + """) + for dhcp_ver in ('dhcp4', 'dhcp6'): + suffix = '6' if dhcp_ver == 'dhcp6' else '' + dhcp_cfg = { + dhcp_ver: True, + '{ver}-overrides'.format(ver=dhcp_ver): {'route-metric': 100}} + v2_input = {'version': 2, 'ethernets': {'eth0': dhcp_cfg}} + ns = network_state.parse_net_config_data(v2_input) + renderer.render_network_state(ns, target=tmp_dir) + self.assertEqual( + expected_tmpl.format(suffix=suffix), + dir2dict(tmp_dir)['/etc/network/interfaces']) + class TestNetplanNetRendering(CiTestCase): -- cgit v1.2.3 From 9662a8791e53bac40a3a37aa3f5bee3076f7d447 Mon Sep 17 00:00:00 2001 From: Joshua Hügli Date: Tue, 12 Nov 2019 13:42:41 -0700 Subject: add data-server dns entry as new metadata server detection --- cloudinit/sources/DataSourceCloudStack.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index f185dc71..e333cb50 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -13,7 +13,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import os -from socket import inet_ntoa +from socket import inet_ntoa, getaddrinfo, gaierror from struct import pack import time @@ -156,6 +156,17 @@ class DataSourceCloudStack(sources.DataSource): return self.metadata['availability-zone'] +def get_data_server(): + # Returns the metadataserver from dns + try: + addrinfo = getaddrinfo("data-server.", 80) + except gaierror: + LOG.debug("DNS Entry data-server not found") + return None + else: + return addrinfo[0][4][0] # return IP + + def get_default_gateway(): # Returns the default gateway ip address in the dotted format. lines = util.load_file("/proc/net/route").splitlines() @@ -218,7 +229,14 @@ def get_vr_address(): # If no virtual router is detected, fallback on default gateway. # See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa - # Try networkd first... + # Try data-server DNS entry first + latest_address = get_data_server() + if latest_address: + LOG.debug("Found metadata server '%s' via data-server DNS entry", + latest_address) + return latest_address + + # Try networkd second... latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS') if latest_address: LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases", -- cgit v1.2.3 From 0469f70c8fde69cfbcbc7181a69964aafba50bbc Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Wed, 13 Nov 2019 18:21:29 +0000 Subject: Fix metadata check when local-hostname is null (#32) Fix traceback when running with a config drive containing a metadata file which has local-hostname set to null. Cloud-init ignores absent local-hostname or None values. LP: #1852100 --- cloudinit/sources/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a319322b..e6baf8f4 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -568,7 +568,7 @@ class DataSource(object): defhost = "localhost" domain = defdomain - if not self.metadata or 'local-hostname' not in self.metadata: + if not self.metadata or not self.metadata.get('local-hostname'): if metadata_only: return None # this is somewhat questionable really. -- cgit v1.2.3 From 9478f0f2fa6935d685092f344b23f34b883149a5 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 13 Nov 2019 13:00:12 -0700 Subject: azure: support secondary ipv6 addresses (#33) Azure's Instance Metadata Service (IMDS) reports multiple IPv6 addresses, via the http://169.254.169.254/metadata/instance/network route. Any additional values after the first in 'ipAddresses' under the 'ipv6' interface key are extracted and configured as static IPs on the interface. --- cloudinit/sources/DataSourceAzure.py | 49 +++++++++++++-------------- tests/unittests/test_datasource/test_azure.py | 34 +++++++++++++++++-- 2 files changed, 56 insertions(+), 27 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 44cca210..87a848ce 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1321,36 +1321,35 @@ def parse_network_config(imds_metadata): LOG.debug('Azure: generating network configuration from IMDS') network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): + # First IPv4 and/or IPv6 address will be obtained via DHCP. + # Any additional IPs of each type will be set as static + # addresses. nicname = 'eth{idx}'.format(idx=idx) - dev_config = {'dhcp4': False, 'dhcp6': False} dhcp_override = {'route-metric': (idx + 1) * 100} - for addr4 in intf['ipv4']['ipAddress']: - privateIpv4 = addr4['privateIpAddress'] - if privateIpv4: - if dev_config.get('dhcp4', False): - # Append static address config for ip > 1 - netPrefix = intf['ipv4']['subnet'][0].get( - 'prefix', '24') - if not dev_config.get('addresses'): - dev_config['addresses'] = [] - dev_config['addresses'].append( - '{ip}/{prefix}'.format( - ip=privateIpv4, prefix=netPrefix)) - else: - dev_config['dhcp4'] = True + dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override, + 'dhcp6': False} + for addr_type in ('ipv4', 'ipv6'): + addresses = intf.get(addr_type, {}).get('ipAddress', []) + if addr_type == 'ipv4': + default_prefix = '24' + else: + default_prefix = '128' + if addresses: + dev_config['dhcp6'] = True # non-primary interfaces should have a higher # route-metric (cost) so default routes prefer # primary nic due to lower route-metric value - dev_config['dhcp4-overrides'] = dhcp_override - for addr6 in intf['ipv6']['ipAddress']: - privateIpv6 = addr6['privateIpAddress'] - if privateIpv6: - dev_config['dhcp6'] = True - # non-primary interfaces should have a higher - # route-metric (cost) so default routes prefer - # primary nic due to lower route-metric value - dev_config['dhcp6-overrides'] = dhcp_override - break + dev_config['dhcp6-overrides'] = dhcp_override + for addr in addresses[1:]: + # Append static address config for ip > 1 + netPrefix = intf[addr_type]['subnet'][0].get( + 'prefix', default_prefix) + privateIp = addr['privateIpAddress'] + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIp, prefix=netPrefix)) if dev_config: mac = ':'.join(re.findall(r'..', intf['macAddress'])) dev_config.update( diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index d92d7b2f..59e351de 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -197,9 +197,11 @@ class TestParseNetworkConfig(CiTestCase): def test_ipv4_and_ipv6_route_metrics_match_for_nics(self): """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" expected = {'ethernets': { - 'eth0': {'dhcp4': True, + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'], + 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}, 'eth1': {'set-name': 'eth1', @@ -214,6 +216,14 @@ class TestParseNetworkConfig(CiTestCase): 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + nic1['ipv6'] = { + "subnet": [{"address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } imds_data['network']['interface'].append(SECONDARY_INTERFACE) third_intf = copy.deepcopy(SECONDARY_INTERFACE) third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') @@ -240,6 +250,26 @@ class TestParseNetworkConfig(CiTestCase): nic1 = imds_data['network']['interface'][0] nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv6_secondary_ips_will_be_static_cidrs(self): + """parse_network_config emits primary ipv6 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + # Secondary ipv6 addresses currently ignored/unconfigured nic1['ipv6'] = { "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], -- cgit v1.2.3 From 62bbc262c3c7f633eac1d09ec78c055eef05166a Mon Sep 17 00:00:00 2001 From: Harald Date: Wed, 20 Nov 2019 18:55:27 +0100 Subject: net: IPv6, accept_ra, slaac, stateless (#51) Router advertisements are required for the default route to be set up, thus accept_ra should be enabled for dhcpv6-stateful. sysconf: IPV6_FORCE_ACCEPT_RA controls accept_ra sysctl. eni: mode static and mode dhcp 'accept_ra' controls sysctl. Add 'accept-ra: true|false' parameter to config v1 and v2. When True: accept_ra is set to '1'. When False: accept_ra is set to '0'. When not defined in config the value is left to the operating system default. This change also extend the IPv6 support to distinguish between slaac and dhcpv6-stateless. SLAAC is autoconfig without any options from DHCP, while stateless auto-configures the address and the uses DHCP for other options. LP: #1806014 LP: #1808647 --- cloudinit/net/eni.py | 15 ++ cloudinit/net/netplan.py | 9 +- cloudinit/net/network_state.py | 21 +- cloudinit/net/sysconfig.py | 34 ++- cloudinit/sources/helpers/openstack.py | 21 +- .../unittests/test_datasource/test_configdrive.py | 3 +- tests/unittests/test_net.py | 238 ++++++++++++++++++++- 7 files changed, 320 insertions(+), 21 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index a9a80c95..70771060 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -399,6 +399,7 @@ class Renderer(renderer.Renderer): def _render_iface(self, iface, render_hwaddress=False): sections = [] subnets = iface.get('subnets', {}) + accept_ra = iface.pop('accept-ra', None) if subnets: for index, subnet in enumerate(subnets): ipv4_subnet_mtu = None @@ -415,9 +416,23 @@ class Renderer(renderer.Renderer): subnet['type'] == 'ipv6_dhcpv6-stateful'): # Configure network settings using DHCP or DHCPv6 iface['mode'] = 'dhcp' + if accept_ra is not None: + # Accept router advertisements (0=off, 1=on) + iface['accept_ra'] = '1' if accept_ra else '0' elif subnet['type'] == 'ipv6_dhcpv6-stateless': # Configure network settings using SLAAC from RAs iface['mode'] = 'auto' + # Use stateless DHCPv6 (0=off, 1=on) + iface['dhcp'] = '1' + elif subnet['type'] == 'ipv6_slaac': + # Configure network settings using SLAAC from RAs + iface['mode'] = 'auto' + # Use stateless DHCPv6 (0=off, 1=on) + iface['dhcp'] = '0' + elif subnet_is_ipv6(subnet) and subnet['type'] == 'static': + if accept_ra is not None: + # Accept router advertisements (0=off, 1=on) + iface['accept_ra'] = '1' if accept_ra else '0' # do not emit multiple 'auto $IFACE' lines as older (precise) # ifupdown complains diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 749d46f8..14d3999f 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -4,7 +4,7 @@ import copy import os from . import renderer -from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2 +from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES from cloudinit import log as logging from cloudinit import util @@ -52,7 +52,8 @@ def _extract_addresses(config, entry, ifname, features=None): 'mtu': 1480, 'netmask': 64, 'type': 'static'}], - 'type: physical' + 'type: physical', + 'accept-ra': 'true' } An entry dictionary looks like: @@ -95,6 +96,8 @@ def _extract_addresses(config, entry, ifname, features=None): if sn_type == 'dhcp': sn_type += '4' entry.update({sn_type: True}) + elif sn_type in IPV6_DYNAMIC_TYPES: + entry.update({'dhcp6': True}) elif sn_type in ['static']: addr = "%s" % subnet.get('address') if 'prefix' in subnet: @@ -147,6 +150,8 @@ def _extract_addresses(config, entry, ifname, features=None): ns = entry.get('nameservers', {}) ns.update({'search': searchdomains}) entry.update({'nameservers': ns}) + if 'accept-ra' in config and config['accept-ra'] is not None: + entry.update({'accept-ra': util.is_true(config.get('accept-ra'))}) def _extract_bond_slaves_by_name(interfaces, entry, bond_master): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 20b7716b..7d206a1a 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -18,13 +18,17 @@ from cloudinit import util LOG = logging.getLogger(__name__) NETWORK_STATE_VERSION = 1 +IPV6_DYNAMIC_TYPES = ['dhcp6', + 'ipv6_slaac', + 'ipv6_dhcpv6-stateless', + 'ipv6_dhcpv6-stateful'] NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } NETWORK_V2_KEY_FILTER = [ 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides', 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers', - 'renderer', 'set-name', 'wakeonlan' + 'renderer', 'set-name', 'wakeonlan', 'accept-ra' ] NET_CONFIG_TO_V2 = { @@ -342,7 +346,8 @@ class NetworkStateInterpreter(object): 'name': 'eth0', 'subnets': [ {'type': 'dhcp4'} - ] + ], + 'accept-ra': 'true' } ''' @@ -362,6 +367,9 @@ class NetworkStateInterpreter(object): self.use_ipv6 = True break + accept_ra = command.get('accept-ra', None) + if accept_ra is not None: + accept_ra = util.is_true(accept_ra) iface.update({ 'name': command.get('name'), 'type': command.get('type'), @@ -372,6 +380,7 @@ class NetworkStateInterpreter(object): 'address': None, 'gateway': None, 'subnets': subnets, + 'accept-ra': accept_ra }) self._network_state['interfaces'].update({command.get('name'): iface}) self.dump_network_state() @@ -615,6 +624,7 @@ class NetworkStateInterpreter(object): driver: ixgbe set-name: lom1 dhcp6: true + accept-ra: true switchports: match: name: enp2* @@ -643,7 +653,7 @@ class NetworkStateInterpreter(object): driver = match.get('driver', None) if driver: phy_cmd['params'] = {'driver': driver} - for key in ['mtu', 'match', 'wakeonlan']: + for key in ['mtu', 'match', 'wakeonlan', 'accept-ra']: if key in cfg: phy_cmd[key] = cfg[key] @@ -928,8 +938,9 @@ def is_ipv6_addr(address): def subnet_is_ipv6(subnet): """Common helper for checking network_state subnets for ipv6.""" - # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful' or 'ipv6_dhcpv6-stateless' - if subnet['type'].endswith('6') or subnet['type'].startswith('ipv6'): + # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or + # 'ipv6_slaac' + if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES: # This is a request for DHCPv6. return True elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index fe0c67ca..310cdf01 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -14,7 +14,7 @@ from configobj import ConfigObj from . import renderer from .network_state import ( - is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6) + is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) LOG = logging.getLogger(__name__) NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" @@ -335,6 +335,9 @@ class Renderer(renderer.Renderer): continue iface_cfg[new_key] = old_value + if iface['accept-ra'] is not None: + iface_cfg['IPV6_FORCE_ACCEPT_RA'] = iface['accept-ra'] + @classmethod def _render_subnets(cls, iface_cfg, subnets, has_default_route): # setting base values @@ -350,6 +353,15 @@ class Renderer(renderer.Renderer): # Configure network settings using DHCPv6 iface_cfg['DHCPV6C'] = True elif subnet_type == 'ipv6_dhcpv6-stateless': + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs and optional + # info from dhcp server using DHCPv6 + iface_cfg['IPV6_AUTOCONF'] = True + iface_cfg['DHCPV6C'] = True + # Use Information-request to get only stateless configuration + # parameters (i.e., without address). + iface_cfg['DHCPV6C_OPTIONS'] = '-S' + elif subnet_type == 'ipv6_slaac': iface_cfg['IPV6INIT'] = True # Configure network settings using SLAAC from RAs iface_cfg['IPV6_AUTOCONF'] = True @@ -398,10 +410,15 @@ class Renderer(renderer.Renderer): # metric may apply to both dhcp and static config if 'metric' in subnet: iface_cfg['METRIC'] = subnet['metric'] + # TODO(hjensas): Including dhcp6 here is likely incorrect. DHCPv6 + # does not ever provide a default gateway, the default gateway + # come from RA's. (https://github.com/openSUSE/wicked/issues/570) if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: if has_default_route and iface_cfg['BOOTPROTO'] != 'none': iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False continue + elif subnet_type in IPV6_DYNAMIC_TYPES: + continue elif subnet_type == 'static': if subnet_is_ipv6(subnet): ipv6_index = ipv6_index + 1 @@ -444,10 +461,14 @@ class Renderer(renderer.Renderer): @classmethod def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): for _, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') for route in subnet.get('routes', []): is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway']) - if _is_default_route(route): + # Any dynamic configuration method, slaac, dhcpv6-stateful/ + # stateless should get router information from router RA's. + if (_is_default_route(route) and subnet_type not in + IPV6_DYNAMIC_TYPES): if ( (subnet.get('ipv4') and route_cfg.has_set_default_ipv4) or @@ -466,10 +487,17 @@ class Renderer(renderer.Renderer): # TODO(harlowja): add validation that no other iface has # also provided the default route? iface_cfg['DEFROUTE'] = True + # TODO(hjensas): Including dhcp6 here is likely incorrect. + # DHCPv6 does not ever provide a default gateway, the + # default gateway come from RA's. + # (https://github.com/openSUSE/wicked/issues/570) if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'): + # NOTE(hjensas): DHCLIENT_SET_DEFAULT_ROUTE is SuSE + # only. RHEL, CentOS, Fedora does not implement this + # option. iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True if 'gateway' in route: - if is_ipv6 or is_ipv6_addr(route['gateway']): + if is_ipv6: iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] route_cfg.has_set_default_ipv6 = True else: diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index d1c4601a..0778f45a 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -584,17 +584,24 @@ def convert_net_json(network_json=None, known_macs=None): if n['link'] == link['id']]: subnet = dict((k, v) for k, v in network.items() if k in valid_keys['subnet']) - if 'dhcp' in network['type']: - t = (network['type'] if network['type'].startswith('ipv6') - else 'dhcp4') - subnet.update({ - 'type': t, - }) - else: + + if network['type'] == 'ipv4_dhcp': + subnet.update({'type': 'dhcp4'}) + elif network['type'] == 'ipv6_dhcp': + subnet.update({'type': 'dhcp6'}) + elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless', + 'ipv6_dhcpv6-stateful']: + subnet.update({'type': network['type']}) + elif network['type'] in ['ipv4', 'ipv6']: subnet.update({ 'type': 'static', 'address': network.get('ip_address'), }) + + # Enable accept_ra for stateful and legacy ipv6_dhcp types + if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']: + cfg.update({'accept-ra': True}) + if network['type'] == 'ipv4': subnet['ipv4'] = True if network['type'] == 'ipv6': diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index cfb3b0a7..6f830cc6 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -547,7 +547,8 @@ class TestNetJson(CiTestCase): 'mtu': None, 'name': 'enp0s2', 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], - 'type': 'physical'} + 'type': 'physical', + 'accept-ra': True} ], } conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 35ce55d2..0f45dc38 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1070,6 +1070,143 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_accept_ra': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 1 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + accept-ra: true + dhcp6: true + """).rstrip(' '), + 'yaml_v1': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: true + """).rstrip(' '), + 'yaml_v2': textwrap.dedent("""\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: true + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_reject_ra': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 0 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + accept-ra: false + dhcp6: true + """).rstrip(' '), + 'yaml_v1': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: false + """).rstrip(' '), + 'yaml_v2': textwrap.dedent("""\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: false + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=no + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'ipv6_slaac': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 0 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_slaac'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'dhcpv6_stateless': { 'expected_eni': textwrap.dedent("""\ auto lo @@ -1077,6 +1214,7 @@ NETWORK_CONFIGS = { auto iface0 iface iface0 inet6 auto + dhcp 1 """).rstrip(' '), 'expected_netplan': textwrap.dedent(""" network: @@ -1097,6 +1235,8 @@ NETWORK_CONFIGS = { 'ifcfg-iface0': textwrap.dedent("""\ BOOTPROTO=none DEVICE=iface0 + DHCPV6C=yes + DHCPV6C_OPTIONS=-S IPV6_AUTOCONF=yes IPV6INIT=yes DEVICE=iface0 @@ -1121,6 +1261,7 @@ NETWORK_CONFIGS = { version: 2 ethernets: iface0: + accept-ra: true dhcp6: true """).rstrip(' '), 'yaml': textwrap.dedent("""\ @@ -1130,6 +1271,7 @@ NETWORK_CONFIGS = { name: 'iface0' subnets: - {'type': 'ipv6_dhcpv6-stateful'} + accept-ra: true """).rstrip(' '), 'expected_sysconfig': { 'ifcfg-iface0': textwrap.dedent("""\ @@ -1137,6 +1279,7 @@ NETWORK_CONFIGS = { DEVICE=iface0 DHCPV6C=yes IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes DEVICE=iface0 NM_CONTROLLED=no ONBOOT=yes @@ -2884,6 +3027,34 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_dhcpv6_accept_ra_config_v1(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_accept_ra_config_v2(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_reject_ra_config_v1(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_reject_ra_config_v2(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + def test_dhcpv6_stateless_config(self): entry = NETWORK_CONFIGS['dhcpv6_stateless'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) @@ -4022,6 +4193,46 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_dhcpv6_accept_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_reject_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_ipv6_slaac(self): + entry = NETWORK_CONFIGS['ipv6_slaac'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_stateless(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_stateful(self): + entry = NETWORK_CONFIGS['dhcpv6_stateful'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_all(self): entry = NETWORK_CONFIGS['all'] files = self._render_and_read(network_config=yaml.load(entry['yaml'])) @@ -4154,16 +4365,37 @@ class TestEniRoundTrip(CiTestCase): def testsimple_render_dhcpv6_stateless(self): entry = NETWORK_CONFIGS['dhcpv6_stateless'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_ipv6_slaac(self): + entry = NETWORK_CONFIGS['ipv6_slaac'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) self.assertEqual( entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) def testsimple_render_dhcpv6_stateful(self): entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_accept_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] files = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + entry['yaml_v1'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_reject_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) self.assertEqual( entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) -- cgit v1.2.3 From 4bc399e0cd0b7e9177f948aecd49f6b8323ff30b Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 22 Nov 2019 21:05:44 -0600 Subject: ec2: Add support for AWS IMDS v2 (session-oriented) (#55) * ec2: Add support for AWS IMDS v2 (session-oriented) AWS now supports a new version of fetching Instance Metadata[1]. Update cloud-init's ec2 utility functions and update ec2 derived datasources accordingly. For DataSourceEc2 (versus ec2-look-alikes) cloud-init will issue the PUT request to obtain an API token for the maximum lifetime and then all subsequent interactions with the IMDS will include the token in the header. If the API token endpoint is unreachable on Ec2 platform, log a warning and fallback to using IMDS v1 and which does not use session tokens when communicating with the Instance metadata service. We handle read errors, typically seen if the IMDS is beyond one etwork hop (IMDSv2 responses have a ttl=1), by setting the api token to a disabled value and then using IMDSv1 paths. To support token-based headers, ec2_utils functions were updated to support custom headers_cb and exception_cb callback functions so Ec2 could store, or refresh API tokens in the event of token becoming stale. [1] https://docs.aws.amazon.com/AWSEC2/latest/ \ UserGuide/ec2-instance-metadata.html \ #instance-metadata-v2-how-it-works --- cloudinit/ec2_utils.py | 37 +++-- cloudinit/sources/DataSourceCloudStack.py | 2 +- cloudinit/sources/DataSourceEc2.py | 166 ++++++++++++++++++--- cloudinit/sources/DataSourceExoscale.py | 2 +- cloudinit/sources/DataSourceMAAS.py | 2 +- cloudinit/sources/DataSourceOpenStack.py | 2 +- cloudinit/url_helper.py | 15 +- tests/unittests/test_datasource/test_cloudstack.py | 21 ++- tests/unittests/test_datasource/test_ec2.py | 6 +- 9 files changed, 201 insertions(+), 52 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 3b7b17f1..57708c14 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -134,25 +134,28 @@ class MetadataMaterializer(object): return joined -def _skip_retry_on_codes(status_codes, _request_args, cause): +def skip_retry_on_codes(status_codes, _request_args, cause): """Returns False if cause.code is in status_codes.""" return cause.code not in status_codes def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5): + ssl_details=None, timeout=5, retries=5, + headers_cb=None, exception_cb=None): ud_url = url_helper.combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(ud_url, 'user-data') user_data = '' try: - # It is ok for userdata to not exist (thats why we are stopping if - # NOT_FOUND occurs) and just in that case returning an empty string. - exception_cb = functools.partial(_skip_retry_on_codes, - SKIP_USERDATA_CODES) + if not exception_cb: + # It is ok for userdata to not exist (thats why we are stopping if + # NOT_FOUND occurs) and just in that case returning an empty + # string. + exception_cb = functools.partial(skip_retry_on_codes, + SKIP_USERDATA_CODES) response = url_helper.read_file_or_url( ud_url, ssl_details=ssl_details, timeout=timeout, - retries=retries, exception_cb=exception_cb) + retries=retries, exception_cb=exception_cb, headers_cb=headers_cb) user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: @@ -165,11 +168,13 @@ def get_instance_userdata(api_version='latest', def _get_instance_metadata(tree, api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): + leaf_decoder=None, headers_cb=None, + exception_cb=None): md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial( url_helper.read_file_or_url, ssl_details=ssl_details, - timeout=timeout, retries=retries) + timeout=timeout, retries=retries, headers_cb=headers_cb, + exception_cb=exception_cb) def mcaller(url): return caller(url).contents @@ -191,22 +196,28 @@ def _get_instance_metadata(tree, api_version='latest', def get_instance_metadata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): + leaf_decoder=None, headers_cb=None, + exception_cb=None): # Note, 'meta-data' explicitly has trailing /. # this is required for CloudStack (LP: #1356855) return _get_instance_metadata(tree='meta-data/', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, - retries=retries, leaf_decoder=leaf_decoder) + retries=retries, leaf_decoder=leaf_decoder, + headers_cb=headers_cb, + exception_cb=exception_cb) def get_instance_identity(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): + leaf_decoder=None, headers_cb=None, + exception_cb=None): return _get_instance_metadata(tree='dynamic/instance-identity', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, - retries=retries, leaf_decoder=leaf_decoder) + retries=retries, leaf_decoder=leaf_decoder, + headers_cb=headers_cb, + exception_cb=exception_cb) # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index e333cb50..2013bed7 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -93,7 +93,7 @@ class DataSourceCloudStack(sources.DataSource): urls = [uhelp.combine_url(self.metadata_address, 'latest/meta-data/instance-id')] start_time = time.time() - url = uhelp.wait_for_url( + url, _response = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds, status_cb=LOG.warning) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 1d88c9b1..b9f346a6 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -28,6 +28,10 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" +API_TOKEN_ROUTE = 'latest/api/token' +API_TOKEN_DISABLED = '_ec2_disable_api_token' +AWS_TOKEN_TTL_SECONDS = '21600' + class CloudNames(object): ALIYUN = "aliyun" @@ -62,6 +66,7 @@ class DataSourceEc2(sources.DataSource): url_max_wait = 120 url_timeout = 50 + _api_token = None # API token for accessing the metadata service _network_config = sources.UNSET # Used to cache calculated network cfg v1 # Whether we want to get network configuration from the metadata service. @@ -148,11 +153,12 @@ class DataSourceEc2(sources.DataSource): min_metadata_version. """ # Assumes metadata service is already up + url_tmpl = '{0}/{1}/meta-data/instance-id' + headers = self._get_headers() for api_ver in self.extended_metadata_versions: - url = '{0}/{1}/meta-data/instance-id'.format( - self.metadata_address, api_ver) + url = url_tmpl.format(self.metadata_address, api_ver) try: - resp = uhelp.readurl(url=url) + resp = uhelp.readurl(url=url, headers=headers) except uhelp.UrlError as e: LOG.debug('url %s raised exception %s', url, e) else: @@ -172,12 +178,39 @@ class DataSourceEc2(sources.DataSource): # setup self.identity. So we need to do that now. api_version = self.get_metadata_api_version() self.identity = ec2.get_instance_identity( - api_version, self.metadata_address).get('document', {}) + api_version, self.metadata_address, + headers_cb=self._get_headers, + exception_cb=self._refresh_stale_aws_token_cb).get( + 'document', {}) return self.identity.get( 'instanceId', self.metadata['instance-id']) else: return self.metadata['instance-id'] + def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None): + if self.cloud_name != CloudNames.AWS: + return + + urls = [] + url2base = {} + url_path = API_TOKEN_ROUTE + request_method = 'PUT' + for url in mdurls: + cur = '{0}/{1}'.format(url, url_path) + urls.append(cur) + url2base[cur] = url + + # use the self._status_cb to check for Read errors, which means + # we can't reach the API token URL, so we should disable IMDSv2 + LOG.debug('Fetching Ec2 IMDSv2 API Token') + url, response = uhelp.wait_for_url( + urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, + headers_cb=self._get_headers, request_method=request_method) + + if url and response: + self._api_token = response + return url2base[url] + def wait_for_metadata_service(self): mcfg = self.ds_cfg @@ -199,27 +232,39 @@ class DataSourceEc2(sources.DataSource): LOG.warning("Empty metadata url list! using default list") mdurls = self.metadata_urls - urls = [] - url2base = {} - for url in mdurls: - cur = '{0}/{1}/meta-data/instance-id'.format( - url, self.min_metadata_version) - urls.append(cur) - url2base[cur] = url - - start_time = time.time() - url = uhelp.wait_for_url( - urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warning) - - if url: - self.metadata_address = url2base[url] + # try the api token path first + metadata_address = self._maybe_fetch_api_token(mdurls) + if not metadata_address: + if self._api_token == API_TOKEN_DISABLED: + LOG.warning('Retrying with IMDSv1') + # if we can't get a token, use instance-id path + urls = [] + url2base = {} + url_path = '{ver}/meta-data/instance-id'.format( + ver=self.min_metadata_version) + request_method = 'GET' + for url in mdurls: + cur = '{0}/{1}'.format(url, url_path) + urls.append(cur) + url2base[cur] = url + + start_time = time.time() + url, _ = uhelp.wait_for_url( + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warning, + headers_cb=self._get_headers, request_method=request_method) + + if url: + metadata_address = url2base[url] + + if metadata_address: + self.metadata_address = metadata_address LOG.debug("Using metadata source: '%s'", self.metadata_address) else: LOG.critical("Giving up on md from %s after %s seconds", urls, int(time.time() - start_time)) - return bool(url) + return bool(metadata_address) def device_name_to_device(self, name): # Consult metadata service, that has @@ -376,14 +421,22 @@ class DataSourceEc2(sources.DataSource): return {} api_version = self.get_metadata_api_version() crawled_metadata = {} + if self.cloud_name == CloudNames.AWS: + exc_cb = self._refresh_stale_aws_token_cb + exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb + else: + exc_cb = exc_cb_ud = None try: crawled_metadata['user-data'] = ec2.get_instance_userdata( - api_version, self.metadata_address) + api_version, self.metadata_address, + headers_cb=self._get_headers, exception_cb=exc_cb_ud) crawled_metadata['meta-data'] = ec2.get_instance_metadata( - api_version, self.metadata_address) + api_version, self.metadata_address, + headers_cb=self._get_headers, exception_cb=exc_cb) if self.cloud_name == CloudNames.AWS: identity = ec2.get_instance_identity( - api_version, self.metadata_address) + api_version, self.metadata_address, + headers_cb=self._get_headers, exception_cb=exc_cb) crawled_metadata['dynamic'] = {'instance-identity': identity} except Exception: util.logexc( @@ -393,6 +446,73 @@ class DataSourceEc2(sources.DataSource): crawled_metadata['_metadata_api_version'] = api_version return crawled_metadata + def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS): + """Request new metadata API token. + @param seconds: The lifetime of the token in seconds + + @return: The API token or None if unavailable. + """ + if self.cloud_name != CloudNames.AWS: + return None + LOG.debug("Refreshing Ec2 metadata API token") + request_header = {'X-aws-ec2-metadata-token-ttl-seconds': seconds} + token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) + try: + response = uhelp.readurl( + token_url, headers=request_header, request_method="PUT") + except uhelp.UrlError as e: + LOG.warning( + 'Unable to get API token: %s raised exception %s', + token_url, e) + return None + return response.contents + + def _skip_or_refresh_stale_aws_token_cb(self, msg, exception): + """Callback will not retry on SKIP_USERDATA_CODES or if no token + is available.""" + retry = ec2.skip_retry_on_codes( + ec2.SKIP_USERDATA_CODES, msg, exception) + if not retry: + return False # False raises exception + return self._refresh_stale_aws_token_cb(msg, exception) + + def _refresh_stale_aws_token_cb(self, msg, exception): + """Exception handler for Ec2 to refresh token if token is stale.""" + if isinstance(exception, uhelp.UrlError) and exception.code == 401: + # With _api_token as None, _get_headers will _refresh_api_token. + LOG.debug("Clearing cached Ec2 API token due to expiry") + self._api_token = None + return True # always retry + + def _status_cb(self, msg, exc=None): + LOG.warning(msg) + if 'Read timed out' in msg: + LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1') + self._api_token = API_TOKEN_DISABLED + + def _get_headers(self, url=''): + """Return a dict of headers for accessing a url. + + If _api_token is unset on AWS, attempt to refresh the token via a PUT + and then return the updated token header. + """ + if self.cloud_name != CloudNames.AWS or (self._api_token == + API_TOKEN_DISABLED): + return {} + # Request a 6 hour token if URL is API_TOKEN_ROUTE + request_token_header = { + 'X-aws-ec2-metadata-token-ttl-seconds': AWS_TOKEN_TTL_SECONDS} + if API_TOKEN_ROUTE in url: + return request_token_header + if not self._api_token: + # If we don't yet have an API token, get one via a PUT against + # API_TOKEN_ROUTE. This _api_token may get unset by a 403 due + # to an invalid or expired token + self._api_token = self._refresh_api_token() + if not self._api_token: + return {} + return {'X-aws-ec2-metadata-token': self._api_token} + class DataSourceEc2Local(DataSourceEc2): """Datasource run at init-local which sets up network to query metadata. diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py index 4616daa7..d59aefd1 100644 --- a/cloudinit/sources/DataSourceExoscale.py +++ b/cloudinit/sources/DataSourceExoscale.py @@ -61,7 +61,7 @@ class DataSourceExoscale(sources.DataSource): metadata_url = "{}/{}/meta-data/instance-id".format( self.metadata_url, self.api_version) - url = url_helper.wait_for_url( + url, _response = url_helper.wait_for_url( urls=[metadata_url], max_wait=self.url_max_wait, timeout=self.url_timeout, diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 61aa6d7e..517913aa 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -136,7 +136,7 @@ class DataSourceMAAS(sources.DataSource): url = url[:-1] check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] - url = self.oauth_helper.wait_for_url( + url, _response = self.oauth_helper.wait_for_url( urls=urls, max_wait=max_wait, timeout=timeout) if url: diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 4a015240..7a5e71b6 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -76,7 +76,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): url_params = self.get_url_params() start_time = time.time() - avail_url = url_helper.wait_for_url( + avail_url, _response = url_helper.wait_for_url( urls=md_urls, max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds) if avail_url: diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0f4c36f7..48ddae45 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -101,7 +101,7 @@ def read_file_or_url(url, timeout=5, retries=10, raise UrlError(cause=e, code=code, headers=None, url=url) return FileResponse(file_path, contents=contents) else: - return readurl(url, timeout=timeout, retries=retries, headers=headers, + return readurl(url, timeout=timeout, retries=retries, headers_cb=headers_cb, data=data, sec_between=sec_between, ssl_details=ssl_details, exception_cb=exception_cb) @@ -310,7 +310,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, headers_cb=None, sleep_time=1, - exception_cb=None, sleep_time_cb=None): + exception_cb=None, sleep_time_cb=None, request_method=None): """ urls: a list of urls to try max_wait: roughly the maximum time to wait before giving up @@ -325,6 +325,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, 'exception', the exception that occurred. sleep_time_cb: call method with 2 arguments (response, loop_n) that generates the next sleep time. + request_method: indicate the type of HTTP request, GET, PUT, or POST + returns: tuple of (url, response contents), on failure, (False, None) the idea of this routine is to wait for the EC2 metadata service to come up. On both Eucalyptus and EC2 we have seen the case where @@ -381,8 +383,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, else: headers = {} - response = readurl(url, headers=headers, timeout=timeout, - check_status=False) + response = readurl( + url, headers=headers, timeout=timeout, + check_status=False, request_method=request_method) if not response.contents: reason = "empty response [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, @@ -392,7 +395,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, url_exc = UrlError(ValueError(reason), code=response.code, headers=response.headers, url=url) else: - return url + return url, response.contents except UrlError as e: reason = "request error [%s]" % e url_exc = e @@ -421,7 +424,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, sleep_time) time.sleep(sleep_time) - return False + return False, None class OauthUrlHelper(object): diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index d6d2d6b2..83c2f753 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -10,6 +10,9 @@ from cloudinit.tests.helpers import CiTestCase, ExitStack, mock import os import time +MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' +DS_PATH = MOD_PATH + '.DataSourceCloudStack' + class TestCloudStackPasswordFetching(CiTestCase): @@ -17,7 +20,7 @@ class TestCloudStackPasswordFetching(CiTestCase): super(TestCloudStackPasswordFetching, self).setUp() self.patches = ExitStack() self.addCleanup(self.patches.close) - mod_name = 'cloudinit.sources.DataSourceCloudStack' + mod_name = MOD_PATH self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) default_gw = "192.201.20.0" @@ -56,7 +59,9 @@ class TestCloudStackPasswordFetching(CiTestCase): ds.get_data() self.assertEqual({}, ds.get_config_obj()) - def test_password_sets_password(self): + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_password_sets_password(self, m_wait): + m_wait.return_value = True password = 'SekritSquirrel' self._set_password_server_response(password) ds = DataSourceCloudStack( @@ -64,7 +69,9 @@ class TestCloudStackPasswordFetching(CiTestCase): ds.get_data() self.assertEqual(password, ds.get_config_obj()['password']) - def test_bad_request_doesnt_stop_ds_from_working(self): + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): + m_wait.return_value = True self._set_password_server_response('bad_request') ds = DataSourceCloudStack( {}, None, helpers.Paths({'run_dir': self.tmp})) @@ -79,7 +86,9 @@ class TestCloudStackPasswordFetching(CiTestCase): request_types.append(arg.split()[1]) self.assertEqual(expected_request_types, request_types) - def test_valid_response_means_password_marked_as_saved(self): + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_valid_response_means_password_marked_as_saved(self, m_wait): + m_wait.return_value = True password = 'SekritSquirrel' subp = self._set_password_server_response(password) ds = DataSourceCloudStack( @@ -92,7 +101,9 @@ class TestCloudStackPasswordFetching(CiTestCase): subp = self._set_password_server_response(response_string) ds = DataSourceCloudStack( {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() + with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: + m_wait.return_value = True + ds.get_data() self.assertRequestTypesSent(subp, ['send_my_password']) def test_password_not_saved_if_empty(self): diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 5e1dd777..34a089f2 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -191,7 +191,9 @@ def register_mock_metaserver(base_url, data): register(base_url, 'not found', status=404) def myreg(*argc, **kwargs): - return httpretty.register_uri(httpretty.GET, *argc, **kwargs) + url = argc[0] + method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET + return httpretty.register_uri(method, *argc, **kwargs) register_helper(myreg, base_url, data) @@ -237,6 +239,8 @@ class TestEc2(test_helpers.HttprettyTestCase): if md: all_versions = ( [ds.min_metadata_version] + ds.extended_metadata_versions) + token_url = self.data_url('latest', data_item='api/token') + register_mock_metaserver(token_url, 'API-TOKEN') for version in all_versions: metadata_url = self.data_url(version) + '/' if version == md_version: -- cgit v1.2.3 From f69d33a723b805fec3ee70c3a6127c8cadcb02d8 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 2 Dec 2019 16:24:18 -0700 Subject: url_helper: read_file_or_url should pass headers param into readurl (#66) Headers param was accidentally omitted and no longer passed through to readurl due to a previous commit. To avoid this omission of params in the future, drop positional param definitions from read_file_or_url and pass all kwargs through to readurl when we are not operating on a file. In util:read_seeded, correct the case where invalid positional param file_retries was being passed into read_file_or_url. Also drop duplicated file:// prefix addition from read_seeded because read_file_or_url does that work anyway. LP: #1854084 --- cloudinit/sources/helpers/azure.py | 6 ++- cloudinit/tests/test_url_helper.py | 52 ++++++++++++++++++++++ cloudinit/url_helper.py | 47 +++++++++++++++---- cloudinit/user_data.py | 2 +- cloudinit/util.py | 15 ++----- .../unittests/test_datasource/test_azure_helper.py | 18 +++++--- 6 files changed, 112 insertions(+), 28 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index f1fba175..f5cdb3fd 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -183,14 +183,16 @@ class AzureEndpointHttpClient(object): if secure: headers = self.headers.copy() headers.update(self.extra_secure_headers) - return url_helper.read_file_or_url(url, headers=headers) + return url_helper.read_file_or_url(url, headers=headers, timeout=5, + retries=10) def post(self, url, data=None, extra_headers=None): headers = self.headers if extra_headers is not None: headers = self.headers.copy() headers.update(extra_headers) - return url_helper.read_file_or_url(url, data=data, headers=headers) + return url_helper.read_file_or_url(url, data=data, headers=headers, + timeout=5, retries=10) class GoalState(object): diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index aa9f3ec1..e883ddc2 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -4,6 +4,7 @@ from cloudinit.url_helper import ( NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) from cloudinit.tests.helpers import CiTestCase, mock, skipIf from cloudinit import util +from cloudinit import version import httpretty import requests @@ -17,6 +18,9 @@ except ImportError: _missing_oauthlib_dep = True +M_PATH = 'cloudinit.url_helper.' + + class TestOAuthHeaders(CiTestCase): def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): @@ -67,6 +71,54 @@ class TestReadFileOrUrl(CiTestCase): self.assertEqual(result.contents, data) self.assertEqual(str(result), data.decode('utf-8')) + @mock.patch(M_PATH + 'readurl') + def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): + """read_file_or_url passes all params through to readurl.""" + url = 'http://hostname/path' + response = 'This is my url content\n' + m_readurl.return_value = response + params = {'url': url, 'timeout': 1, 'retries': 2, + 'headers': {'somehdr': 'val'}, + 'data': 'data', 'sec_between': 1, + 'ssl_details': {'cert_file': '/path/cert.pem'}, + 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} + self.assertEqual(response, read_file_or_url(**params)) + params.pop('url') # url is passed in as a positional arg + self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) + + def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): + """Readurl param defaults used when unspecified by read_file_or_url + + Param defaults tested are as follows: + retries: 0, additional headers None beyond default, method: GET, + data: None, check_status: True and allow_redirects: True + """ + url = 'http://hostname/path' + + m_response = mock.MagicMock() + + class FakeSession(requests.Session): + def request(cls, **kwargs): + self.assertEqual( + {'url': url, 'allow_redirects': True, 'method': 'GET', + 'headers': { + 'User-Agent': 'Cloud-Init/%s' % ( + version.version_string())}}, + kwargs) + return m_response + + with mock.patch(M_PATH + 'requests.Session') as m_session: + error = requests.exceptions.HTTPError('broke') + m_session.side_effect = [error, FakeSession()] + # assert no retries and check_status == True + with self.assertRaises(UrlError) as context_manager: + response = read_file_or_url(url) + self.assertEqual('broke', str(context_manager.exception)) + # assert default headers, method, url and allow_redirects True + # Success on 2nd call with FakeSession + response = read_file_or_url(url) + self.assertEqual(m_response, response._response) + class TestRetryOnUrlExc(CiTestCase): diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 48ddae45..1496a471 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -81,14 +81,19 @@ def combine_url(base, *add_ons): return url -def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, ssl_details=None, - headers_cb=None, exception_cb=None): +def read_file_or_url(url, **kwargs): + """Wrapper function around readurl to allow passing a file path as url. + + When url is not a local file path, passthrough any kwargs to readurl. + + In the case of parameter passthrough to readurl, default values for some + parameters. See: call-signature of readurl in this module for param docs. + """ url = url.lstrip() if url.startswith("/"): url = "file://%s" % url if url.lower().startswith("file://"): - if data: + if kwargs.get("data"): LOG.warning("Unable to post data to file resource %s", url) file_path = url[len("file://"):] try: @@ -101,10 +106,7 @@ def read_file_or_url(url, timeout=5, retries=10, raise UrlError(cause=e, code=code, headers=None, url=url) return FileResponse(file_path, contents=contents) else: - return readurl(url, timeout=timeout, retries=retries, - headers_cb=headers_cb, data=data, - sec_between=sec_between, ssl_details=ssl_details, - exception_cb=exception_cb) + return readurl(url, **kwargs) # Made to have same accessors as UrlResponse so that the @@ -201,6 +203,35 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, check_status=True, allow_redirects=True, exception_cb=None, session=None, infinite=False, log_req_resp=True, request_method=None): + """Wrapper around requests.Session to read the url and retry if necessary + + :param url: Mandatory url to request. + :param data: Optional form data to post the URL. Will set request_method + to 'POST' if present. + :param timeout: Timeout in seconds to wait for a response + :param retries: Number of times to retry on exception if exception_cb is + None or exception_cb returns True for the exception caught. Default is + to fail with 0 retries on exception. + :param sec_between: Default 1: amount of seconds passed to time.sleep + between retries. None or -1 means don't sleep. + :param headers: Optional dict of headers to send during request + :param headers_cb: Optional callable returning a dict of values to send as + headers during request + :param ssl_details: Optional dict providing key_file, ca_certs, and + cert_file keys for use on in ssl connections. + :param check_status: Optional boolean set True to raise when HTTPError + occurs. Default: True. + :param allow_redirects: Optional boolean passed straight to Session.request + as 'allow_redirects'. Default: True. + :param exception_cb: Optional callable which accepts the params + msg and exception and returns a boolean True if retries are permitted. + :param session: Optional exiting requests.Session instance to reuse. + :param infinite: Bool, set True to retry indefinitely. Default: False. + :param log_req_resp: Set False to turn off verbose debug messages. + :param request_method: String passed as 'method' to Session.request. + Typically GET, or POST. Default: POST if data is provided, GET + otherwise. + """ url = _cleanurl(url) req_args = { 'url': url, diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index ed83d2d8..15af1daf 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -224,7 +224,7 @@ class UserDataProcessor(object): content = util.load_file(include_once_fn) else: try: - resp = read_file_or_url(include_url, + resp = read_file_or_url(include_url, timeout=5, retries=10, ssl_details=self.ssl_details) if include_once_on and resp.ok(): util.write_file(include_once_fn, resp.contents, diff --git a/cloudinit/util.py b/cloudinit/util.py index 78b6a2d0..9d9d5c72 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -986,13 +986,6 @@ def load_yaml(blob, default=None, allowed=(dict,)): def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): - if base.startswith("/"): - base = "file://%s" % base - - # default retries for file is 0. for network is 10 - if base.startswith("file://"): - retries = file_retries - if base.find("%s") >= 0: ud_url = base % ("user-data" + ext) md_url = base % ("meta-data" + ext) @@ -1000,14 +993,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): ud_url = "%s%s%s" % (base, "user-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) - md_resp = url_helper.read_file_or_url(md_url, timeout, retries, - file_retries) + md_resp = url_helper.read_file_or_url(md_url, timeout=timeout, + retries=retries) md = None if md_resp.ok(): md = load_yaml(decode_binary(md_resp.contents), default={}) - ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, - file_retries) + ud_resp = url_helper.read_file_or_url(ud_url, timeout=timeout, + retries=retries) ud = None if ud_resp.ok(): ud = ud_resp.contents diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index bd006aba..bd17f636 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -212,8 +212,10 @@ class TestAzureEndpointHttpClient(CiTestCase): response = client.get(url, secure=False) self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=self.regular_headers), - self.read_file_or_url.call_args) + self.assertEqual( + mock.call(url, headers=self.regular_headers, retries=10, + timeout=5), + self.read_file_or_url.call_args) def test_secure_get(self): url = 'MyTestUrl' @@ -227,8 +229,10 @@ class TestAzureEndpointHttpClient(CiTestCase): response = client.get(url, secure=True) self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=expected_headers), - self.read_file_or_url.call_args) + self.assertEqual( + mock.call(url, headers=expected_headers, retries=10, + timeout=5), + self.read_file_or_url.call_args) def test_post(self): data = mock.MagicMock() @@ -238,7 +242,8 @@ class TestAzureEndpointHttpClient(CiTestCase): self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) self.assertEqual( - mock.call(url, data=data, headers=self.regular_headers), + mock.call(url, data=data, headers=self.regular_headers, retries=10, + timeout=5), self.read_file_or_url.call_args) def test_post_with_extra_headers(self): @@ -250,7 +255,8 @@ class TestAzureEndpointHttpClient(CiTestCase): expected_headers = self.regular_headers.copy() expected_headers.update(extra_headers) self.assertEqual( - mock.call(mock.ANY, data=mock.ANY, headers=expected_headers), + mock.call(mock.ANY, data=mock.ANY, headers=expected_headers, + retries=10, timeout=5), self.read_file_or_url.call_args) -- cgit v1.2.3 From 129b1c4ea250619bd7caed7aaffacc796b0139f2 Mon Sep 17 00:00:00 2001 From: AOhassan <37305877+AOhassan@users.noreply.github.com> Date: Thu, 12 Dec 2019 13:51:42 -0800 Subject: azure: avoid re-running cloud-init when instance-id is byte-swapped (#84) Azure stores the instance ID with an incorrect byte ordering for the first three hyphen delimited parts. This results in invalid is_new_instance checks forcing Azure datasource to recrawl the metadata service. When persisting instance-id from the metadata service, swap the instance-id string byte order such that it is consistent with that returned by dmi information. Check whether the instance-id string is a byte-swapped match when determining correctly whether the Azure platform instance-id has actually changed. --- cloudinit/sources/DataSourceAzure.py | 16 ++++++++++--- cloudinit/sources/helpers/azure.py | 27 ++++++++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++--- .../unittests/test_datasource/test_azure_helper.py | 19 +++++++++++++++ 4 files changed, 80 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 87a848ce..24f448c5 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -33,7 +33,8 @@ from cloudinit.sources.helpers.azure import ( get_boot_telemetry, get_system_info, report_diagnostic_event, - EphemeralDHCPv4WithReporting) + EphemeralDHCPv4WithReporting, + is_byte_swapped) LOG = logging.getLogger(__name__) @@ -471,8 +472,7 @@ class DataSourceAzure(sources.DataSource): seed = _get_random_seed() if seed: crawled_data['metadata']['random_seed'] = seed - crawled_data['metadata']['instance-id'] = util.read_dmi_data( - 'system-uuid') + crawled_data['metadata']['instance-id'] = self._iid() if perform_reprovision: LOG.info("Reporting ready to Azure after getting ReprovisionData") @@ -558,6 +558,16 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def _iid(self, previous=None): + prev_iid_path = os.path.join( + self.paths.get_cpath('data'), 'instance-id') + iid = util.read_dmi_data('system-uuid') + if os.path.exists(prev_iid_path): + previous = util.load_file(prev_iid_path).strip() + if is_byte_swapped(previous, iid): + return previous + return iid + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index f5cdb3fd..fc760581 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -7,6 +7,7 @@ import re import socket import struct import time +import textwrap from cloudinit.net import dhcp from cloudinit import stages @@ -48,6 +49,32 @@ def azure_ds_telemetry_reporter(func): return impl +def is_byte_swapped(previous_id, current_id): + """ + Azure stores the instance ID with an incorrect byte ordering for the + first parts. This corrects the byte order such that it is consistent with + that returned by the metadata service. + """ + if previous_id == current_id: + return False + + def swap_bytestring(s, width=2): + dd = [byte for byte in textwrap.wrap(s, 2)] + dd.reverse() + return ''.join(dd) + + parts = current_id.split('-') + swapped_id = '-'.join([ + swap_bytestring(parts[0]), + swap_bytestring(parts[1]), + swap_bytestring(parts[2]), + parts[3], + parts[4] + ]) + + return previous_id == swapped_id + + @azure_ds_telemetry_reporter def get_boot_telemetry(): """Report timestamps related to kernel initialization and systemd diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 59e351de..a809fd87 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -477,7 +477,7 @@ scbus-1 on xpt0 bus 0 'public-keys': [], }) - self.instance_id = 'test-instance-id' + self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' def _dmi_mocks(key): if key == 'system-uuid': @@ -645,7 +645,7 @@ scbus-1 on xpt0 bus 0 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, 'imds': NETWORK_METADATA, - 'instance-id': 'test-instance-id', + 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -1091,6 +1091,24 @@ scbus-1 on xpt0 bus 0 self.assertTrue(ret) self.assertEqual('value', dsrc.metadata['test']) + def test_instance_id_endianness(self): + """Return the previous iid when dmi uuid is the byteswapped iid.""" + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + # byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + # not byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + def test_instance_id_from_dmidecode_used(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) ds.get_data() @@ -1292,7 +1310,7 @@ class TestAzureBounce(CiTestCase): def _dmi_mocks(key): if key == 'system-uuid': - return 'test-instance-id' + return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' elif key == 'chassis-asset-tag': return '7783-7084-3265-9085-8269-3286-77' raise RuntimeError('should not get here') diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index bd17f636..007df09f 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -170,6 +170,25 @@ class TestGoalStateParsing(CiTestCase): goal_state = self._get_goal_state(instance_id=instance_id) self.assertEqual(instance_id, goal_state.instance_id) + def test_instance_id_byte_swap(self): + """Return true when previous_iid is byteswapped current_iid""" + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" + self.assertTrue( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_same_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_diff_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + def test_certificates_xml_parsed_and_fetched_correctly(self): http_client = mock.MagicMock() certificates_url = 'TestCertificatesUrl' -- cgit v1.2.3 From 1bf41cdda3805b053f198ab94717dba247edd969 Mon Sep 17 00:00:00 2001 From: Adam Dobrawy Date: Tue, 17 Dec 2019 17:13:00 +0100 Subject: rbxcloud: fix dsname in RbxCloud LP: #1855196 --- cloudinit/sources/DataSourceRbxCloud.py | 1 + 1 file changed, 1 insertion(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py index 9a8c3d5c..c3cd5c79 100644 --- a/cloudinit/sources/DataSourceRbxCloud.py +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -197,6 +197,7 @@ def read_user_data_callback(mount_dir): class DataSourceRbxCloud(sources.DataSource): + dsname = "RbxCloud" update_events = {'network': [ EventType.BOOT_NEW_INSTANCE, EventType.BOOT -- cgit v1.2.3 From 8116493950e7c47af0ce66fc1bb5d799ce5e477a Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 18 Dec 2019 16:22:02 -0500 Subject: cloud-init: fix capitalisation of SSH (#126) * cc_ssh: fix capitalisation of SSH * doc: fix capitalisation of SSH * cc_keys_to_console: fix capitalisation of SSH * ssh_util: fix capitalisation of SSH * DataSourceIBMCloud: fix capitalisation of SSH * DataSourceAzure: fix capitalisation of SSH * cs_utils: fix capitalisation of SSH * distros/__init__: fix capitalisation of SSH * cc_set_passwords: fix capitalisation of SSH * cc_ssh_import_id: fix capitalisation of SSH * cc_users_groups: fix capitalisation of SSH * cc_ssh_authkey_fingerprints: fix capitalisation of SSH --- cloudinit/config/cc_keys_to_console.py | 6 +++--- cloudinit/config/cc_set_passwords.py | 6 +++--- cloudinit/config/cc_ssh.py | 12 ++++++------ cloudinit/config/cc_ssh_authkey_fingerprints.py | 6 +++--- cloudinit/config/cc_ssh_import_id.py | 8 ++++---- cloudinit/config/cc_users_groups.py | 6 +++--- cloudinit/config/tests/test_set_passwords.py | 4 ++-- cloudinit/cs_utils.py | 2 +- cloudinit/distros/__init__.py | 4 ++-- cloudinit/sources/DataSourceAzure.py | 6 +++--- cloudinit/sources/DataSourceIBMCloud.py | 2 +- cloudinit/ssh_util.py | 8 ++++---- doc/examples/cloud-config-ssh-keys.txt | 4 +--- doc/rtd/topics/datasources.rst | 2 +- doc/rtd/topics/datasources/cloudstack.rst | 2 +- doc/rtd/topics/examples.rst | 2 +- doc/rtd/topics/format.rst | 2 +- doc/rtd/topics/instancedata.rst | 2 +- tests/unittests/test_distros/test_create_users.py | 2 +- 19 files changed, 42 insertions(+), 44 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index 8f8735ce..3d2ded3d 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -9,10 +9,10 @@ """ Keys to Console --------------- -**Summary:** control which ssh keys may be written to console +**Summary:** control which SSH keys may be written to console -For security reasons it may be desirable not to write ssh fingerprints and keys -to the console. To avoid the fingerprint of types of ssh keys being written to +For security reasons it may be desirable not to write SSH fingerprints and keys +to the console. To avoid the fingerprint of types of SSH keys being written to console the ``ssh_fp_console_blacklist`` config key can be used. By default all types of keys will have their fingerprints written to console. To avoid keys of a key type being written to console the ``ssh_key_console_blacklist`` config diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index c3c5b0ff..e3b39d8b 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -112,7 +112,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): elif util.is_false(pw_auth): cfg_val = 'no' else: - bmsg = "Leaving ssh config '%s' unchanged." % cfg_name + bmsg = "Leaving SSH config '%s' unchanged." % cfg_name if pw_auth is None or pw_auth.lower() == 'unchanged': LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) else: @@ -121,7 +121,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): updated = update_ssh_config({cfg_name: cfg_val}) if not updated: - LOG.debug("No need to restart ssh service, %s not updated.", cfg_name) + LOG.debug("No need to restart SSH service, %s not updated.", cfg_name) return if 'systemctl' in service_cmd: @@ -129,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): else: cmd = list(service_cmd) + [service_name, "restart"] util.subp(cmd) - LOG.debug("Restarted the ssh daemon.") + LOG.debug("Restarted the SSH daemon.") def handle(_name, cfg, cloud, log, args): diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index bb26fb2b..163cce99 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -9,9 +9,9 @@ """ SSH --- -**Summary:** configure ssh and ssh keys (host and authorized) +**Summary:** configure SSH and SSH keys (host and authorized) -This module handles most configuration for ssh and both host and authorized ssh +This module handles most configuration for SSH and both host and authorized SSH keys. Authorized Keys @@ -24,7 +24,7 @@ account's home directory. Authorized keys for the default user defined in should be specified as a list of public keys. .. note:: - see the ``cc_set_passwords`` module documentation to enable/disable ssh + see the ``cc_set_passwords`` module documentation to enable/disable SSH password authentication Root login can be enabled/disabled using the ``disable_root`` config key. Root @@ -39,7 +39,7 @@ Host Keys ^^^^^^^^^ Host keys are for authenticating a specific instance. Many images have default -host ssh keys, which can be removed using ``ssh_deletekeys``. This prevents +host SSH keys, which can be removed using ``ssh_deletekeys``. This prevents re-use of a private host key from an image on multiple machines. Since removing default host keys is usually the desired behavior this option is enabled by default. @@ -225,7 +225,7 @@ def handle(_name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True): keys = cloud.get_public_ssh_keys() or [] else: - log.debug('Skipping import of publish ssh keys per ' + log.debug('Skipping import of publish SSH keys per ' 'config setting: allow_public_ssh_keys=False') if "ssh_authorized_keys" in cfg: @@ -234,7 +234,7 @@ def handle(_name, cfg, cloud, log, _args): apply_credentials(keys, user, disable_root, disable_root_opts) except Exception: - util.logexc(log, "Applying ssh credentials failed!") + util.logexc(log, "Applying SSH credentials failed!") def apply_credentials(keys, user, disable_root, disable_root_opts): diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 98b0e665..dcf86fdc 100755 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -7,7 +7,7 @@ """ SSH Authkey Fingerprints ------------------------ -**Summary:** log fingerprints of user ssh keys +**Summary:** log fingerprints of user SSH keys Write fingerprints of authorized keys for each user to log. This is enabled by default, but can be disabled using ``no_ssh_fingerprints``. The hash type for @@ -68,7 +68,7 @@ def _is_printable_key(entry): def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', prefix='ci-info: '): if not key_entries: - message = ("%sno authorized ssh keys fingerprints found for user %s.\n" + message = ("%sno authorized SSH keys fingerprints found for user %s.\n" % (prefix, user)) util.multi_log(message) return @@ -98,7 +98,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', def handle(name, cfg, cloud, log, _args): if util.is_true(cfg.get('no_ssh_fingerprints', False)): log.debug(("Skipping module named %s, " - "logging of ssh fingerprints disabled"), name) + "logging of SSH fingerprints disabled"), name) return hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 6b46dafe..63f87298 100755 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -9,9 +9,9 @@ """ SSH Import Id ------------- -**Summary:** import ssh id +**Summary:** import SSH id -This module imports ssh keys from either a public keyserver, usually launchpad +This module imports SSH keys from either a public keyserver, usually launchpad or github using ``ssh-import-id``. Keys are referenced by the username they are associated with on the keyserver. The keyserver can be specified by prepending either ``lp:`` for launchpad or ``gh:`` for github to the username. @@ -98,12 +98,12 @@ def import_ssh_ids(ids, user, log): raise exc cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids - log.debug("Importing ssh ids for user %s.", user) + log.debug("Importing SSH ids for user %s.", user) try: util.subp(cmd, capture=False) except util.ProcessExecutionError as exc: - util.logexc(log, "Failed to run command to import %s ssh ids", user) + util.logexc(log, "Failed to run command to import %s SSH ids", user) raise exc # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index c32a743a..13764e60 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -51,14 +51,14 @@ config keys for an entry in ``users`` are as follows: a Snappy user through ``snap create-user``. If an Ubuntu SSO account is associated with the address, username and SSH keys will be requested from there. Default: none - - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's + - ``ssh_authorized_keys``: Optional. List of SSH keys to add to user's authkeys file. Default: none. This key can not be combined with ``ssh_redirect_user``. - ``ssh_import_id``: Optional. SSH id to import for user. Default: none. This key can not be combined with ``ssh_redirect_user``. - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH - logins for this user. When specified, all cloud meta-data public ssh - keys will be set up in a disabled state for this username. Any ssh login + logins for this user. When specified, all cloud meta-data public SSH + keys will be set up in a disabled state for this username. Any SSH login as this username will timeout and prompt with a message to login instead as the configured for this instance. Default: false. This key can not be combined with ``ssh_import_id`` or diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index 639fb9ea..85e2f1fe 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -45,7 +45,7 @@ class TestHandleSshPwauth(CiTestCase): """If config is not updated, then no system restart should be done.""" setpass.handle_ssh_pwauth(True) m_subp.assert_not_called() - self.assertIn("No need to restart ssh", self.logs.getvalue()) + self.assertIn("No need to restart SSH", self.logs.getvalue()) @mock.patch(MODPATH + "update_ssh_config", return_value=True) @mock.patch(MODPATH + "util.subp") @@ -80,7 +80,7 @@ class TestSetPasswordsHandle(CiTestCase): setpass.handle( 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[]) self.assertEqual( - "DEBUG: Leaving ssh config 'PasswordAuthentication' unchanged. " + "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. " 'ssh_pwauth=None\n', self.logs.getvalue()) diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py index 51c09582..8bac9c44 100644 --- a/cloudinit/cs_utils.py +++ b/cloudinit/cs_utils.py @@ -14,7 +14,7 @@ Having the server definition accessible by the VM can ve useful in various ways. For example it is possible to easily determine from within the VM, which network interfaces are connected to public and which to private network. Another use is to pass some data to initial VM setup scripts, like setting the -hostname to the VM name or passing ssh public keys through server meta. +hostname to the VM name or passing SSH public keys through server meta. For more information take a look at the Server Context section of CloudSigma API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2b559fe6..6d69e6ca 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -385,7 +385,7 @@ class Distro(object): Add a user to the system using standard GNU tools """ # XXX need to make add_user idempotent somehow as we - # still want to add groups or modify ssh keys on pre-existing + # still want to add groups or modify SSH keys on pre-existing # users in the image. if util.is_user(name): LOG.info("User %s already exists, skipping.", name) @@ -561,7 +561,7 @@ class Distro(object): cloud_keys = kwargs.get('cloud_public_ssh_keys', []) if not cloud_keys: LOG.warning( - 'Unable to disable ssh logins for %s given' + 'Unable to disable SSH logins for %s given' ' ssh_redirect_user: %s. No cloud public-keys present.', name, kwargs['ssh_redirect_user']) else: diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 24f448c5..61ec522a 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -355,16 +355,16 @@ class DataSourceAzure(sources.DataSource): for pk in self.cfg.get('_pubkeys', []): if pk.get('value', None): key_value = pk['value'] - LOG.debug("ssh authentication: using value from fabric") + LOG.debug("SSH authentication: using value from fabric") else: bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] - LOG.debug("ssh authentication: " + LOG.debug("SSH authentication: " "using fingerprint from fabric") with events.ReportEventStack( name="waiting-for-ssh-public-key", - description="wait for agents to retrieve ssh keys", + description="wait for agents to retrieve SSH keys", parent=azure_ds_reporter): # wait very long for public SSH keys to arrive # https://bugs.launchpad.net/cloud-init/+bug/1717611 diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index 21e6ae6b..e0c714e8 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -83,7 +83,7 @@ creates 6 boot scenarios. There is no information available to identify this scenario. - The user will be able to ssh in as as root with their public keys that + The user will be able to SSH in as as root with their public keys that have been installed into /root/ssh/.authorized_keys during the provisioning stage. diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index bcb23a5a..c3a9b5b7 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -17,7 +17,7 @@ LOG = logging.getLogger(__name__) # See: man sshd_config DEF_SSHD_CFG = "/etc/ssh/sshd_config" -# taken from openssh source openssh-7.3p1/sshkey.c: +# taken from OpenSSH source openssh-7.3p1/sshkey.c: # static const struct keytype keytypes[] = { ... } VALID_KEY_TYPES = ( "dsa", @@ -207,7 +207,7 @@ def update_authorized_keys(old_entries, keys): def users_ssh_info(username): pw_ent = pwd.getpwnam(username) if not pw_ent or not pw_ent.pw_dir: - raise RuntimeError("Unable to get ssh info for user %r" % (username)) + raise RuntimeError("Unable to get SSH info for user %r" % (username)) return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent) @@ -245,7 +245,7 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): except (IOError, OSError): # Give up and use a default key filename auth_key_fns[0] = default_authorizedkeys_file - util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh " + util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in SSH " "config from %r, using 'AuthorizedKeysFile' file " "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) @@ -349,7 +349,7 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG): def update_ssh_config_lines(lines, updates): - """Update the ssh config lines per updates. + """Update the SSH config lines per updates. @param lines: array of SshdConfigLine. This array is updated in place. @param updates: dictionary of desired values {Option: value} diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt index 235a114f..aad8b683 100644 --- a/doc/examples/cloud-config-ssh-keys.txt +++ b/doc/examples/cloud-config-ssh-keys.txt @@ -6,7 +6,7 @@ ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies -# Send pre-generated ssh private keys to the server +# Send pre-generated SSH private keys to the server # If these are present, they will be written to /etc/ssh and # new random keys will not be generated # in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported @@ -42,5 +42,3 @@ ssh_keys: -----END DSA PRIVATE KEY----- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost - - diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 0b3a385e..3d026143 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -140,7 +140,7 @@ The current interface that a datasource object must provide is the following: # because cloud-config content would be handled elsewhere def get_config_obj(self) - #returns a list of public ssh keys + # returns a list of public SSH keys def get_public_ssh_keys(self) # translates a device 'short' name into the actual physical device diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst index 95b95874..da183226 100644 --- a/doc/rtd/topics/datasources/cloudstack.rst +++ b/doc/rtd/topics/datasources/cloudstack.rst @@ -4,7 +4,7 @@ CloudStack ========== `Apache CloudStack`_ expose user-data, meta-data, user password and account -sshkey thru the Virtual-Router. The datasource obtains the VR address via +SSH key thru the Virtual-Router. The datasource obtains the VR address via dhcp lease information given to the instance. For more details on meta-data and user-data, refer the `CloudStack Administrator Guide`_. diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst index 94e6ed18..81860f85 100644 --- a/doc/rtd/topics/examples.rst +++ b/doc/rtd/topics/examples.rst @@ -128,7 +128,7 @@ Reboot/poweroff when finished :language: yaml :linenos: -Configure instances ssh-keys +Configure instances SSH keys ============================ .. literalinclude:: ../../examples/cloud-config-ssh-keys.txt diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index a6e9b44e..2b60bdd3 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -113,7 +113,7 @@ These things include: - apt upgrade should be run on first boot - a different apt mirror should be used - additional apt sources should be added -- certain ssh keys should be imported +- certain SSH keys should be imported - *and many more...* .. note:: diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index c17d0a0e..e7dd0d62 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -165,7 +165,7 @@ Examples output: v1.public_ssh_keys ------------------ -A list of ssh keys provided to the instance by the datasource metadata. +A list of SSH keys provided to the instance by the datasource metadata. Examples output: diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index 40624952..ef11784d 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -206,7 +206,7 @@ class TestCreateUser(CiTestCase): user = 'foouser' self.dist.create_user(user, ssh_redirect_user='someuser') self.assertIn( - 'WARNING: Unable to disable ssh logins for foouser given ' + 'WARNING: Unable to disable SSH logins for foouser given ' 'ssh_redirect_user: someuser. No cloud public-keys present.\n', self.logs.getvalue()) m_setup_user_keys.assert_not_called() -- cgit v1.2.3 From 8c4fd886931abcf2cc8627a47463907d655b35c3 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 21 Jan 2020 17:15:30 -0500 Subject: Start removing dependency on six (#178) * url_helper: drop six * url_helper: sort imports * log: drop six * log: sort imports * handlers/__init__: drop six * handlers/__init__: sort imports * user_data: drop six * user_data: sort imports * sources/__init__: drop six * sources/__init__: sort imports * DataSourceOVF: drop six * DataSourceOVF: sort imports * sources/helpers/openstack: drop six * sources/helpers/openstack: sort imports * mergers/m_str: drop six This also allowed simplification of the logic, as we will never encounter a non-string text type. * type_utils: drop six * mergers/m_dict: drop six * mergers/m_list: drop six * cmd/query: drop six * mergers/__init__: drop six * net/cmdline: drop six * reporting/handlers: drop six * reporting/handlers: sort imports --- cloudinit/cmd/query.py | 3 +-- cloudinit/handlers/__init__.py | 9 +++------ cloudinit/log.py | 14 +++++--------- cloudinit/mergers/__init__.py | 4 +--- cloudinit/mergers/m_dict.py | 4 +--- cloudinit/mergers/m_list.py | 4 +--- cloudinit/mergers/m_str.py | 9 ++------- cloudinit/net/cmdline.py | 5 +---- cloudinit/reporting/handlers.py | 19 ++++++------------- cloudinit/sources/DataSourceOVF.py | 8 ++------ cloudinit/sources/__init__.py | 21 +++++++++------------ cloudinit/sources/helpers/openstack.py | 8 ++------ cloudinit/type_utils.py | 25 +++++++------------------ cloudinit/url_helper.py | 20 +++++--------------- cloudinit/user_data.py | 11 ++++------- 15 files changed, 50 insertions(+), 114 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index 1d888b9d..e3db8679 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -5,7 +5,6 @@ import argparse from errno import EACCES import os -import six import sys from cloudinit.handlers.jinja_template import ( @@ -149,7 +148,7 @@ def handle_args(name, args): response = '\n'.join(sorted(response.keys())) elif args.list_keys: response = '\n'.join(sorted(response.keys())) - if not isinstance(response, six.string_types): + if not isinstance(response, str): response = util.json_dumps(response) print(response) return 0 diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 0db75af9..a409ff8a 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -10,14 +10,12 @@ import abc import os -import six - -from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils from cloudinit import util +from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) LOG = logging.getLogger(__name__) @@ -60,8 +58,7 @@ INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()), key=(lambda e: 0 - len(e))) -@six.add_metaclass(abc.ABCMeta) -class Handler(object): +class Handler(metaclass=abc.ABCMeta): def __init__(self, frequency, version=2): self.handler_version = version @@ -159,7 +156,7 @@ def _extract_first_or_bytes(blob, size): # Extract the first line or upto X symbols for text objects # Extract first X bytes for binary objects try: - if isinstance(blob, six.string_types): + if isinstance(blob, str): start = blob.split("\n", 1)[0] else: # We want to avoid decoding the whole blob (it might be huge) diff --git a/cloudinit/log.py b/cloudinit/log.py index 5ae312ba..827db12b 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -8,17 +8,13 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import collections +import io import logging import logging.config import logging.handlers - -import collections import os import sys - -import six -from six import StringIO - import time # Logging levels for easy access @@ -74,13 +70,13 @@ def setupLogging(cfg=None): log_cfgs = [] log_cfg = cfg.get('logcfg') - if log_cfg and isinstance(log_cfg, six.string_types): + if log_cfg and isinstance(log_cfg, str): # If there is a 'logcfg' entry in the config, # respect it, it is the old keyname log_cfgs.append(str(log_cfg)) elif "log_cfgs" in cfg: for a_cfg in cfg['log_cfgs']: - if isinstance(a_cfg, six.string_types): + if isinstance(a_cfg, str): log_cfgs.append(a_cfg) elif isinstance(a_cfg, (collections.Iterable)): cfg_str = [str(c) for c in a_cfg] @@ -100,7 +96,7 @@ def setupLogging(cfg=None): # is acting as a file) pass else: - log_cfg = StringIO(log_cfg) + log_cfg = io.StringIO(log_cfg) # Attempt to load its config logging.config.fileConfig(log_cfg) # The first one to work wins! diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 7fbc25ff..668e3cd6 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -6,8 +6,6 @@ import re -import six - from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils @@ -85,7 +83,7 @@ def dict_extract_mergers(config): raw_mergers = config.pop('merge_type', None) if raw_mergers is None: return parsed_mergers - if isinstance(raw_mergers, six.string_types): + if isinstance(raw_mergers, str): return string_extract_mergers(raw_mergers) for m in raw_mergers: if isinstance(m, (dict)): diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index 6c5fddc2..93472f13 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - DEF_MERGE_TYPE = 'no_replace' MERGE_TYPES = ('replace', DEF_MERGE_TYPE,) @@ -47,7 +45,7 @@ class Merger(object): return new_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, six.string_types) and self._recurse_str: + if isinstance(new_v, str) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index daa0469a..19f32771 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - DEF_MERGE_TYPE = 'replace' MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace') @@ -63,7 +61,7 @@ class Merger(object): return old_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, six.string_types) and self._recurse_str: + if isinstance(new_v, str) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py index 629df58e..539e3e29 100644 --- a/cloudinit/mergers/m_str.py +++ b/cloudinit/mergers/m_str.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - class Merger(object): def __init__(self, _merger, opts): @@ -23,13 +21,10 @@ class Merger(object): # perform the following action, if appending we will # merge them together, otherwise we will just return value. def _on_str(self, value, merge_with): - if not isinstance(value, six.string_types): + if not isinstance(value, str): return merge_with if not self._append: return merge_with - if isinstance(value, six.text_type): - return value + six.text_type(merge_with) - else: - return value + six.binary_type(merge_with) + return value + merge_with # vi: ts=4 expandtab diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 55166ea8..bfb40aae 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -12,8 +12,6 @@ import gzip import io import os -import six - from cloudinit import util from . import get_devicelist @@ -22,8 +20,7 @@ from . import read_sys_net_safe _OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" -@six.add_metaclass(abc.ABCMeta) -class InitramfsNetworkConfigSource(object): +class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta): """ABC for net config sources that read config written by initramfses""" @abc.abstractmethod diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 6605e795..946df7e0 100755 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -1,25 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -import uuid import fcntl import json -import six import os +import queue import struct import threading import time +import uuid +from datetime import datetime from cloudinit import log as logging from cloudinit.registry import DictRegistry from cloudinit import (url_helper, util) -from datetime import datetime -from six.moves.queue import Empty as QueueEmptyError - -if six.PY2: - from multiprocessing.queues import JoinableQueue as JQueue -else: - from queue import Queue as JQueue LOG = logging.getLogger(__name__) @@ -28,8 +22,7 @@ class ReportException(Exception): pass -@six.add_metaclass(abc.ABCMeta) -class ReportingHandler(object): +class ReportingHandler(metaclass=abc.ABCMeta): """Base class for report handlers. Implement :meth:`~publish_event` for controlling what @@ -141,7 +134,7 @@ class HyperVKvpReportingHandler(ReportingHandler): self._kvp_file_path) self._event_types = event_types - self.q = JQueue() + self.q = queue.Queue() self.incarnation_no = self._get_incarnation_no() self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, self.incarnation_no) @@ -303,7 +296,7 @@ class HyperVKvpReportingHandler(ReportingHandler): # get all the rest of the events in the queue event = self.q.get(block=False) items_from_queue += 1 - except QueueEmptyError: + except queue.Empty: event = None try: self._append_kvp_item(encoded_data) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 896841e3..9f6e6b6c 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -8,19 +8,15 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from xml.dom import minidom - import base64 import os import re import time - -import six +from xml.dom import minidom from cloudinit import log as logging from cloudinit import sources from cloudinit import util - from cloudinit.sources.helpers.vmware.imc.config \ import Config from cloudinit.sources.helpers.vmware.imc.config_custom_script \ @@ -458,7 +454,7 @@ def maybe_cdrom_device(devname): """ if not devname: return False - elif not isinstance(devname, six.string_types): + elif not isinstance(devname, str): raise ValueError("Unexpected input for devname: %s" % devname) # resolve '..' and multi '/' elements diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6baf8f4..dd93cfd8 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -9,21 +9,19 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -from collections import namedtuple import copy import json import os -import six +from collections import namedtuple -from cloudinit.atomic_helper import write_json from cloudinit import importer from cloudinit import log as logging from cloudinit import net -from cloudinit.event import EventType from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util - +from cloudinit.atomic_helper import write_json +from cloudinit.event import EventType from cloudinit.filters import launch_index from cloudinit.reporting import events @@ -136,8 +134,7 @@ URLParams = namedtuple( 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) -@six.add_metaclass(abc.ABCMeta) -class DataSource(object): +class DataSource(metaclass=abc.ABCMeta): dsmode = DSMODE_NETWORK default_locale = 'en_US.UTF-8' @@ -436,7 +433,7 @@ class DataSource(object): return self._cloud_name if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY): cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) - if isinstance(cloud_name, six.string_types): + if isinstance(cloud_name, str): self._cloud_name = cloud_name.lower() else: self._cloud_name = self._get_cloud_name().lower() @@ -718,8 +715,8 @@ def normalize_pubkey_data(pubkey_data): if not pubkey_data: return keys - if isinstance(pubkey_data, six.string_types): - return str(pubkey_data).splitlines() + if isinstance(pubkey_data, str): + return pubkey_data.splitlines() if isinstance(pubkey_data, (list, set)): return list(pubkey_data) @@ -729,7 +726,7 @@ def normalize_pubkey_data(pubkey_data): # lp:506332 uec metadata service responds with # data that makes boto populate a string for 'klist' rather # than a list. - if isinstance(klist, six.string_types): + if isinstance(klist, str): klist = [klist] if isinstance(klist, (list, set)): for pkey in klist: @@ -837,7 +834,7 @@ def convert_vendordata(data, recurse=True): """ if not data: return None - if isinstance(data, six.string_types): + if isinstance(data, str): return data if isinstance(data, list): return copy.deepcopy(data) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 0778f45a..441db506 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -12,15 +12,12 @@ import copy import functools import os -import six - from cloudinit import ec2_utils from cloudinit import log as logging from cloudinit import net from cloudinit import sources from cloudinit import url_helper from cloudinit import util - from cloudinit.sources import BrokenMetadata # See https://docs.openstack.org/user-guide/cli-config-drive.html @@ -163,8 +160,7 @@ class SourceMixin(object): return device -@six.add_metaclass(abc.ABCMeta) -class BaseReader(object): +class BaseReader(metaclass=abc.ABCMeta): def __init__(self, base_path): self.base_path = base_path @@ -227,7 +223,7 @@ class BaseReader(object): """ load_json_anytype = functools.partial( - util.load_json, root_types=(dict, list) + six.string_types) + util.load_json, root_types=(dict, list, str)) def datafiles(version): files = {} diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py index 6132654b..2c1ae368 100644 --- a/cloudinit/type_utils.py +++ b/cloudinit/type_utils.py @@ -10,29 +10,18 @@ import types -import six - -if six.PY3: - _NAME_TYPES = ( - types.ModuleType, - types.FunctionType, - types.LambdaType, - type, - ) -else: - _NAME_TYPES = ( - types.TypeType, - types.ModuleType, - types.FunctionType, - types.LambdaType, - types.ClassType, - ) +_NAME_TYPES = ( + types.ModuleType, + types.FunctionType, + types.LambdaType, + type, +) def obj_name(obj): if isinstance(obj, _NAME_TYPES): - return six.text_type(obj.__name__) + return str(obj.__name__) else: if not hasattr(obj, '__class__'): return repr(obj) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 1496a471..f6d68436 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -10,32 +10,22 @@ import json import os -import requests -import six import time - from email.utils import parsedate from errno import ENOENT from functools import partial +from http.client import NOT_FOUND from itertools import count -from requests import exceptions +from urllib.parse import urlparse, urlunparse, quote -from six.moves.urllib.parse import ( - urlparse, urlunparse, - quote as urlquote) +import requests +from requests import exceptions from cloudinit import log as logging from cloudinit import version LOG = logging.getLogger(__name__) -if six.PY2: - import httplib - NOT_FOUND = httplib.NOT_FOUND -else: - import http.client - NOT_FOUND = http.client.NOT_FOUND - # Check if requests has ssl support (added in requests >= 0.8.8) SSL_ENABLED = False @@ -71,7 +61,7 @@ def combine_url(base, *add_ons): path = url_parsed[2] if path and not path.endswith("/"): path += "/" - path += urlquote(str(add_on), safe="/:") + path += quote(str(add_on), safe="/:") url_parsed[2] = path return urlunparse(url_parsed) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 15af1daf..6f41b03a 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -9,14 +9,11 @@ # This file is part of cloud-init. See LICENSE file for license information. import os - from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.nonmultipart import MIMENonMultipart from email.mime.text import MIMEText -import six - from cloudinit import handlers from cloudinit import log as logging from cloudinit.url_helper import read_file_or_url, UrlError @@ -259,7 +256,7 @@ class UserDataProcessor(object): # filename and type not be present # or # scalar(payload) - if isinstance(ent, six.string_types): + if isinstance(ent, str): ent = {'content': ent} if not isinstance(ent, (dict)): # TODO(harlowja) raise? @@ -269,13 +266,13 @@ class UserDataProcessor(object): mtype = ent.get('type') if not mtype: default = ARCHIVE_UNDEF_TYPE - if isinstance(content, six.binary_type): + if isinstance(content, bytes): default = ARCHIVE_UNDEF_BINARY_TYPE mtype = handlers.type_from_starts_with(content, default) maintype, subtype = mtype.split('/', 1) if maintype == "text": - if isinstance(content, six.binary_type): + if isinstance(content, bytes): content = content.decode() msg = MIMEText(content, _subtype=subtype) else: @@ -348,7 +345,7 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): msg.set_payload(data) return msg - if isinstance(raw_data, six.text_type): + if isinstance(raw_data, str): bdata = raw_data.encode('utf-8') else: bdata = raw_data -- cgit v1.2.3 From bb71a9d08d25193836eda91c328760305285574e Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 21 Jan 2020 18:02:42 -0500 Subject: Drop most of the remaining use of six (#179) --- cloudinit/config/cc_chef.py | 4 +-- cloudinit/config/cc_mcollective.py | 10 +++---- cloudinit/config/cc_ntp.py | 20 ++++++------- cloudinit/config/cc_power_state_change.py | 9 +++--- cloudinit/config/cc_rsyslog.py | 7 ++--- cloudinit/config/cc_ubuntu_advantage.py | 4 +-- cloudinit/config/cc_write_files.py | 3 +- cloudinit/config/cc_yum_add_repo.py | 12 +++----- cloudinit/distros/__init__.py | 13 ++++----- cloudinit/distros/freebsd.py | 7 ++--- cloudinit/distros/parsers/sys_conf.py | 6 ++-- cloudinit/distros/ug_util.py | 22 +++++++-------- cloudinit/net/network_state.py | 11 +++----- cloudinit/net/renderer.py | 4 +-- cloudinit/net/sysconfig.py | 15 +++++----- cloudinit/sources/tests/test_init.py | 33 +--------------------- cloudinit/sources/tests/test_oracle.py | 3 +- cloudinit/stages.py | 6 ++-- cloudinit/tests/helpers.py | 15 +++++----- tests/unittests/test_cli.py | 16 +++++------ tests/unittests/test_datasource/test_smartos.py | 4 +-- tests/unittests/test_handler/test_handler_chef.py | 3 +- .../test_handler/test_handler_write_files.py | 15 +++++----- tests/unittests/test_log.py | 11 ++++---- tests/unittests/test_merging.py | 6 ++-- tests/unittests/test_util.py | 17 ++++++----- 26 files changed, 104 insertions(+), 172 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 0ad6b7f1..01d61fa1 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -79,8 +79,6 @@ from cloudinit import templater from cloudinit import url_helper from cloudinit import util -import six - RUBY_VERSION_DEFAULT = "1.8" CHEF_DIRS = tuple([ @@ -273,7 +271,7 @@ def run_chef(chef_cfg, log): cmd_args = chef_cfg['exec_arguments'] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) - elif isinstance(cmd_args, six.string_types): + elif isinstance(cmd_args, str): cmd.append(cmd_args) else: log.warning("Unknown type %s provided for chef" diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index d5f63f5f..351183f1 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -49,9 +49,7 @@ private certificates for mcollective. Their values will be written to """ import errno - -import six -from six import BytesIO +import io # Used since this can maintain comments # and doesn't need a top level section @@ -73,7 +71,7 @@ def configure(config, server_cfg=SERVER_CFG, # original file in order to be able to mix the rest up. try: old_contents = util.load_file(server_cfg, quiet=False, decode=False) - mcollective_config = ConfigObj(BytesIO(old_contents)) + mcollective_config = ConfigObj(io.BytesIO(old_contents)) except IOError as e: if e.errno != errno.ENOENT: raise @@ -93,7 +91,7 @@ def configure(config, server_cfg=SERVER_CFG, 'plugin.ssl_server_private'] = pricert_file mcollective_config['securityprovider'] = 'ssl' else: - if isinstance(cfg, six.string_types): + if isinstance(cfg, str): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): @@ -119,7 +117,7 @@ def configure(config, server_cfg=SERVER_CFG, raise # Now we got the whole (new) file, write to disk... - contents = BytesIO() + contents = io.BytesIO() mcollective_config.write(contents) util.write_file(server_cfg, contents.getvalue(), mode=0o644) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 9e074bda..5498bbaa 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -6,19 +6,17 @@ """NTP: enable and configure ntp""" -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +import copy +import os +from textwrap import dedent + from cloudinit import log as logging -from cloudinit.settings import PER_INSTANCE from cloudinit import temp_utils from cloudinit import templater from cloudinit import type_utils from cloudinit import util - -import copy -import os -import six -from textwrap import dedent +from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -460,7 +458,7 @@ def supplemental_schema_validation(ntp_config): for key, value in sorted(ntp_config.items()): keypath = 'ntp:config:' + key if key == 'confpath': - if not all([value, isinstance(value, six.string_types)]): + if not all([value, isinstance(value, str)]): errors.append( 'Expected a config file path {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) @@ -472,11 +470,11 @@ def supplemental_schema_validation(ntp_config): elif key in ('template', 'template_name'): if value is None: # Either template or template_name can be none continue - if not isinstance(value, six.string_types): + if not isinstance(value, str): errors.append( 'Expected a string type for {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) - elif not isinstance(value, six.string_types): + elif not isinstance(value, str): errors.append( 'Expected a string type for {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 43a479cf..3e81a3c7 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -49,16 +49,15 @@ key returns 0. condition: """ -from cloudinit.settings import PER_INSTANCE -from cloudinit import util - import errno import os import re -import six import subprocess import time +from cloudinit.settings import PER_INSTANCE +from cloudinit import util + frequency = PER_INSTANCE EXIT_FAIL = 254 @@ -183,7 +182,7 @@ def load_power_state(cfg): pstate['timeout']) condition = pstate.get("condition", True) - if not isinstance(condition, six.string_types + (list, bool)): + if not isinstance(condition, (str, list, bool)): raise TypeError("condition type %s invalid. must be list, bool, str") return (args, timeout, condition) diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index ff211f65..5df0137d 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -180,7 +180,6 @@ config entries. Legacy to new mappings are as follows: import os import re -import six from cloudinit import log as logging from cloudinit import util @@ -233,9 +232,9 @@ def load_config(cfg): fillup = ( (KEYNAME_CONFIGS, [], list), - (KEYNAME_DIR, DEF_DIR, six.string_types), - (KEYNAME_FILENAME, DEF_FILENAME, six.string_types), - (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)), + (KEYNAME_DIR, DEF_DIR, str), + (KEYNAME_FILENAME, DEF_FILENAME, str), + (KEYNAME_RELOAD, DEF_RELOAD, (str, list)), (KEYNAME_REMOTES, DEF_REMOTES, dict)) for key, default, vtypes in fillup: diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index f846e9a5..8b6d2a1a 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -4,8 +4,6 @@ from textwrap import dedent -import six - from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit import log as logging @@ -98,7 +96,7 @@ def configure_ua(token=None, enable=None): if enable is None: enable = [] - elif isinstance(enable, six.string_types): + elif isinstance(enable, str): LOG.warning('ubuntu_advantage: enable should be a list, not' ' a string; treating as a single enable') enable = [enable] diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 0b6546e2..bd87e9e5 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -57,7 +57,6 @@ binary gzip data can be specified and will be decoded before being written. import base64 import os -import six from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE @@ -126,7 +125,7 @@ def decode_perms(perm, default): if perm is None: return default try: - if isinstance(perm, six.integer_types + (float,)): + if isinstance(perm, (int, float)): # Just 'downcast' it (if a float) return int(perm) else: diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 3b354a7d..3673166a 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -30,13 +30,9 @@ entry, the config entry will be skipped. # any repository configuration options (see man yum.conf) """ +import io import os - -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser -import six +from configparser import ConfigParser from cloudinit import util @@ -57,7 +53,7 @@ def _format_repo_value(val): # Can handle 'lists' in certain cases # See: https://linux.die.net/man/5/yum.conf return "\n".join([_format_repo_value(v) for v in val]) - if not isinstance(val, six.string_types): + if not isinstance(val, str): return str(val) return val @@ -72,7 +68,7 @@ def _format_repository_config(repo_id, repo_config): # For now assume that people using this know # the format of yum and don't verify keys/values further to_be.set(repo_id, k, _format_repo_value(v)) - to_be_stream = six.StringIO() + to_be_stream = io.StringIO() to_be.write(to_be_stream) to_be_stream.seek(0) lines = to_be_stream.readlines() diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index cdce26f2..92598a2d 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -9,13 +9,11 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six -from six import StringIO - import abc import os import re import stat +from io import StringIO from cloudinit import importer from cloudinit import log as logging @@ -53,8 +51,7 @@ _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate'] -@six.add_metaclass(abc.ABCMeta) -class Distro(object): +class Distro(metaclass=abc.ABCMeta): usr_lib_exec = "/usr/lib" hosts_fn = "/etc/hosts" @@ -429,7 +426,7 @@ class Distro(object): # support kwargs having groups=[list] or groups="g1,g2" groups = kwargs.get('groups') if groups: - if isinstance(groups, six.string_types): + if isinstance(groups, str): groups = groups.split(",") # remove any white spaces in group names, most likely @@ -544,7 +541,7 @@ class Distro(object): if 'ssh_authorized_keys' in kwargs: # Try to handle this in a smart manner. keys = kwargs['ssh_authorized_keys'] - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] elif isinstance(keys, dict): keys = list(keys.values()) @@ -668,7 +665,7 @@ class Distro(object): if isinstance(rules, (list, tuple)): for rule in rules: lines.append("%s %s" % (user, rule)) - elif isinstance(rules, six.string_types): + elif isinstance(rules, str): lines.append("%s %s" % (user, rules)) else: msg = "Can not create sudoers rule addition with type %r" diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 40e435e7..026d1142 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -5,10 +5,8 @@ # This file is part of cloud-init. See LICENSE file for license information. import os -import six -from six import StringIO - import re +from io import StringIO from cloudinit import distros from cloudinit import helpers @@ -108,8 +106,7 @@ class Distro(distros.Distro): } for key, val in kwargs.items(): - if (key in pw_useradd_opts and val and - isinstance(val, six.string_types)): + if key in pw_useradd_opts and val and isinstance(val, str): pw_useradd_cmd.extend([pw_useradd_opts[key], val]) elif key in pw_useradd_flags and val: diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index 44df17de..dee4c551 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -4,11 +4,9 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six -from six import StringIO - import pipes import re +from io import StringIO # This library is used to parse/write # out the various sysconfig files edited (best attempt effort) @@ -65,7 +63,7 @@ class SysConf(configobj.ConfigObj): return out_contents.getvalue() def _quote(self, value, multiline=False): - if not isinstance(value, six.string_types): + if not isinstance(value, str): raise ValueError('Value "%s" is not a string' % (value)) if len(value) == 0: return '' diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py index 9378dd78..08446a95 100755 --- a/cloudinit/distros/ug_util.py +++ b/cloudinit/distros/ug_util.py @@ -9,8 +9,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - from cloudinit import log as logging from cloudinit import type_utils from cloudinit import util @@ -29,7 +27,7 @@ LOG = logging.getLogger(__name__) # is the standard form used in the rest # of cloud-init def _normalize_groups(grp_cfg): - if isinstance(grp_cfg, six.string_types): + if isinstance(grp_cfg, str): grp_cfg = grp_cfg.strip().split(",") if isinstance(grp_cfg, list): c_grp_cfg = {} @@ -39,7 +37,7 @@ def _normalize_groups(grp_cfg): if k not in c_grp_cfg: if isinstance(v, list): c_grp_cfg[k] = list(v) - elif isinstance(v, six.string_types): + elif isinstance(v, str): c_grp_cfg[k] = [v] else: raise TypeError("Bad group member type %s" % @@ -47,12 +45,12 @@ def _normalize_groups(grp_cfg): else: if isinstance(v, list): c_grp_cfg[k].extend(v) - elif isinstance(v, six.string_types): + elif isinstance(v, str): c_grp_cfg[k].append(v) else: raise TypeError("Bad group member type %s" % type_utils.obj_name(v)) - elif isinstance(i, six.string_types): + elif isinstance(i, str): if i not in c_grp_cfg: c_grp_cfg[i] = [] else: @@ -89,7 +87,7 @@ def _normalize_users(u_cfg, def_user_cfg=None): if isinstance(u_cfg, dict): ad_ucfg = [] for (k, v) in u_cfg.items(): - if isinstance(v, (bool, int, float) + six.string_types): + if isinstance(v, (bool, int, float, str)): if util.is_true(v): ad_ucfg.append(str(k)) elif isinstance(v, dict): @@ -99,12 +97,12 @@ def _normalize_users(u_cfg, def_user_cfg=None): raise TypeError(("Unmappable user value type %s" " for key %s") % (type_utils.obj_name(v), k)) u_cfg = ad_ucfg - elif isinstance(u_cfg, six.string_types): + elif isinstance(u_cfg, str): u_cfg = util.uniq_merge_sorted(u_cfg) users = {} for user_config in u_cfg: - if isinstance(user_config, (list,) + six.string_types): + if isinstance(user_config, (list, str)): for u in util.uniq_merge(user_config): if u and u not in users: users[u] = {} @@ -209,7 +207,7 @@ def normalize_users_groups(cfg, distro): old_user = cfg['user'] # Translate it into the format that is more useful # going forward - if isinstance(old_user, six.string_types): + if isinstance(old_user, str): old_user = { 'name': old_user, } @@ -238,7 +236,7 @@ def normalize_users_groups(cfg, distro): default_user_config = util.mergemanydict([old_user, distro_user_config]) base_users = cfg.get('users', []) - if not isinstance(base_users, (list, dict) + six.string_types): + if not isinstance(base_users, (list, dict, str)): LOG.warning(("Format for 'users' key must be a comma separated string" " or a dictionary or a list and not %s"), type_utils.obj_name(base_users)) @@ -252,7 +250,7 @@ def normalize_users_groups(cfg, distro): base_users.append({'name': 'default'}) elif isinstance(base_users, dict): base_users['default'] = dict(base_users).get('default', True) - elif isinstance(base_users, six.string_types): + elif isinstance(base_users, str): # Just append it on to be re-parsed later base_users += ",default" diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 9b126100..63d6e291 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -10,8 +10,6 @@ import logging import socket import struct -import six - from cloudinit import safeyaml from cloudinit import util @@ -186,7 +184,7 @@ class NetworkState(object): def iter_interfaces(self, filter_func=None): ifaces = self._network_state.get('interfaces', {}) - for iface in six.itervalues(ifaces): + for iface in ifaces.values(): if filter_func is None: yield iface else: @@ -220,8 +218,7 @@ class NetworkState(object): ) -@six.add_metaclass(CommandHandlerMeta) -class NetworkStateInterpreter(object): +class NetworkStateInterpreter(metaclass=CommandHandlerMeta): initial_network_state = { 'interfaces': {}, @@ -970,7 +967,7 @@ def ipv4_mask_to_net_prefix(mask): """ if isinstance(mask, int): return mask - if isinstance(mask, six.string_types): + if isinstance(mask, str): try: return int(mask) except ValueError: @@ -997,7 +994,7 @@ def ipv6_mask_to_net_prefix(mask): if isinstance(mask, int): return mask - if isinstance(mask, six.string_types): + if isinstance(mask, str): try: return int(mask) except ValueError: diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py index 5f32e90f..2a61a7a8 100644 --- a/cloudinit/net/renderer.py +++ b/cloudinit/net/renderer.py @@ -6,7 +6,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -import six +import io from .network_state import parse_net_config_data from .udev import generate_udev_rule @@ -34,7 +34,7 @@ class Renderer(object): """Given state, emit udev rules to map mac to ifname.""" # TODO(harlowja): this seems shared between eni renderer and # this, so move it to a shared location. - content = six.StringIO() + content = io.StringIO() for iface in network_state.iter_interfaces(filter_by_physical): # for physical interfaces write out a persist net udev rule if 'name' in iface and iface.get('mac_address'): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 3e06af01..07668d3e 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -1,16 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. +import io import os import re -import six +from configobj import ConfigObj -from cloudinit.distros.parsers import networkmanager_conf -from cloudinit.distros.parsers import resolv_conf from cloudinit import log as logging from cloudinit import util - -from configobj import ConfigObj +from cloudinit.distros.parsers import networkmanager_conf +from cloudinit.distros.parsers import resolv_conf from . import renderer from .network_state import ( @@ -96,7 +95,7 @@ class ConfigMap(object): return len(self._conf) def to_string(self): - buf = six.StringIO() + buf = io.StringIO() buf.write(_make_header()) if self._conf: buf.write("\n") @@ -104,7 +103,7 @@ class ConfigMap(object): value = self._conf[key] if isinstance(value, bool): value = self._bool_map[value] - if not isinstance(value, six.string_types): + if not isinstance(value, str): value = str(value) buf.write("%s=%s\n" % (key, _quote_value(value))) return buf.getvalue() @@ -150,7 +149,7 @@ class Route(ConfigMap): # only accept ipv4 and ipv6 if proto not in ['ipv4', 'ipv6']: raise ValueError("Unknown protocol '%s'" % (str(proto))) - buf = six.StringIO() + buf = io.StringIO() buf.write(_make_header()) if self._conf: buf.write("\n") diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 9698261b..f73b37ed 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -3,7 +3,6 @@ import copy import inspect import os -import six import stat from cloudinit.event import EventType @@ -13,7 +12,7 @@ from cloudinit.sources import ( EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, canonical_cloud_id, redact_sensitive_keys) -from cloudinit.tests.helpers import CiTestCase, skipIf, mock +from cloudinit.tests.helpers import CiTestCase, mock from cloudinit.user_data import UserDataProcessor from cloudinit import util @@ -422,7 +421,6 @@ class TestDataSource(CiTestCase): {'network_json': 'is good'}, instance_data['ds']['network_json']) - @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes") def test_get_data_base64encodes_unserializable_bytes(self): """On py3, get_data base64encodes any unserializable content.""" tmp = self.tmp_dir() @@ -440,35 +438,6 @@ class TestDataSource(CiTestCase): {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, instance_json['ds']['meta_data']) - @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes") - def test_get_data_handles_bytes_values(self): - """On py2 get_data handles bytes values without having to b64encode.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) - self.assertTrue(datasource.get_data()) - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - content = util.load_file(json_file) - instance_json = util.load_json(content) - self.assertEqual([], instance_json['base64_encoded_keys']) - self.assertEqual( - {'key1': 'val1', 'key2': {'key2.1': '\x123'}}, - instance_json['ds']['meta_data']) - - @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") - def test_non_utf8_encoding_gets_b64encoded(self): - """When non-utf-8 values exist in py2 instance-data is b64encoded.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) - self.assertTrue(datasource.get_data()) - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - instance_json = util.load_json(util.load_file(json_file)) - key21_value = instance_json['ds']['meta_data']['key2']['key2.1'] - self.assertEqual('ci-b64:' + util.b64e(b'ab\xaadef'), key21_value) - def test_get_hostname_subclass_support(self): """Validate get_hostname signature on all subclasses of DataSource.""" # Use inspect.getfullargspec when we drop py2.6 and py2.7 diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 85b6db97..6c551fcb 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -13,7 +13,6 @@ import httpretty import json import mock import os -import six import uuid DS_PATH = "cloudinit.sources.DataSourceOracle" @@ -334,7 +333,7 @@ class TestReadMetaData(test_helpers.HttprettyTestCase): for k, v in data.items(): httpretty.register_uri( httpretty.GET, self.mdurl + MD_VER + "/" + k, - v if not isinstance(v, six.text_type) else v.encode('utf-8')) + v if not isinstance(v, str) else v.encode('utf-8')) def test_broken_no_sys_uuid(self, m_read_system_uuid): """Datasource requires ability to read system_uuid and true return.""" diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 71f3a49e..db8ba64c 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -6,11 +6,9 @@ import copy import os +import pickle import sys -import six -from six.moves import cPickle as pickle - from cloudinit.settings import ( FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG) @@ -758,7 +756,7 @@ class Modules(object): for item in cfg_mods: if not item: continue - if isinstance(item, six.string_types): + if isinstance(item, str): module_list.append({ 'mod': item.strip(), }) diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 4dad2afd..0220648d 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -4,6 +4,7 @@ from __future__ import print_function import functools import httpretty +import io import logging import os import random @@ -14,7 +15,6 @@ import tempfile import time import mock -import six import unittest2 from unittest2.util import strclass @@ -72,7 +72,7 @@ def retarget_many_wrapper(new_base, am, old_func): # Python 3 some of these now accept file-descriptors (integers). # That breaks rebase_path() so in lieu of a better solution, just # don't rebase if we get a fd. - if isinstance(path, six.string_types): + if isinstance(path, str): n_args[i] = rebase_path(path, new_base) return old_func(*n_args, **kwds) return wrapper @@ -149,7 +149,7 @@ class CiTestCase(TestCase): if self.with_logs: # Create a log handler so unit tests can search expected logs. self.logger = logging.getLogger() - self.logs = six.StringIO() + self.logs = io.StringIO() formatter = logging.Formatter('%(levelname)s: %(message)s') handler = logging.StreamHandler(self.logs) handler.setFormatter(formatter) @@ -166,7 +166,7 @@ class CiTestCase(TestCase): else: cmd = args[0] - if not isinstance(cmd, six.string_types): + if not isinstance(cmd, str): cmd = cmd[0] pass_through = False if not isinstance(self.allowed_subp, (list, bool)): @@ -346,8 +346,9 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchOpen(self, new_root): trap_func = retarget_many_wrapper(new_root, 1, open) - name = 'builtins.open' if six.PY3 else '__builtin__.open' - self.patched_funcs.enter_context(mock.patch(name, trap_func)) + self.patched_funcs.enter_context( + mock.patch('builtins.open', trap_func) + ) def patchStdoutAndStderr(self, stdout=None, stderr=None): if stdout is not None: @@ -420,7 +421,7 @@ def populate_dir(path, files): p = os.path.sep.join([path, name]) util.ensure_dir(os.path.dirname(p)) with open(p, "wb") as fp: - if isinstance(content, six.binary_type): + if isinstance(content, bytes): fp.write(content) else: fp.write(content.encode('utf-8')) diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index d283f136..e57c15d1 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,8 +1,8 @@ # This file is part of cloud-init. See LICENSE file for license information. -from collections import namedtuple import os -import six +import io +from collections import namedtuple from cloudinit.cmd import main as cli from cloudinit.tests import helpers as test_helpers @@ -18,7 +18,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def setUp(self): super(TestCLI, self).setUp() - self.stderr = six.StringIO() + self.stderr = io.StringIO() self.patchStdoutAndStderr(stderr=self.stderr) def _call_main(self, sysv_args=None): @@ -147,7 +147,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_conditional_subcommands_from_entry_point_sys_argv(self): """Subcommands from entry-point are properly parsed from sys.argv.""" - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) expected_errors = [ @@ -178,7 +178,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_collect_logs_subcommand_parser(self): """The subcommand cloud-init collect-logs calls the subparser.""" # Provide -h param to collect-logs to avoid having to mock behavior. - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'collect-logs', '-h']) self.assertIn('usage: cloud-init collect-log', stdout.getvalue()) @@ -186,7 +186,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_clean_subcommand_parser(self): """The subcommand cloud-init clean calls the subparser.""" # Provide -h param to clean to avoid having to mock behavior. - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'clean', '-h']) self.assertIn('usage: cloud-init clean', stdout.getvalue()) @@ -194,7 +194,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_status_subcommand_parser(self): """The subcommand cloud-init status calls the subparser.""" # Provide -h param to clean to avoid having to mock behavior. - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'status', '-h']) self.assertIn('usage: cloud-init status', stdout.getvalue()) @@ -219,7 +219,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_wb_devel_schema_subcommand_doc_content(self): """Validate that doc content is sane from known examples.""" - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'devel', 'schema', '--doc']) expected_doc_sections = [ diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index d5b1c29c..62084de5 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -33,8 +33,6 @@ from cloudinit.sources.DataSourceSmartOS import ( identify_file) from cloudinit.event import EventType -import six - from cloudinit import helpers as c_helpers from cloudinit.util import ( b64e, subp, ProcessExecutionError, which, write_file) @@ -798,7 +796,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): return self.serial.write.call_args[0][0] def test_get_metadata_writes_bytes(self): - self.assertIsInstance(self._get_written_line(), six.binary_type) + self.assertIsInstance(self._get_written_line(), bytes) def test_get_metadata_line_starts_with_v2(self): foo = self._get_written_line() diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index f4311268..2dab3a54 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -4,7 +4,6 @@ import httpretty import json import logging import os -import six from cloudinit import cloud from cloudinit.config import cc_chef @@ -178,7 +177,7 @@ class TestChef(FilesystemMockingTestCase): continue # the value from the cfg overrides that in the default val = cfg['chef'].get(k, v) - if isinstance(val, six.string_types): + if isinstance(val, str): self.assertIn(val, c) c = util.load_file(cc_chef.CHEF_FB_PATH) self.assertEqual({}, json.loads(c)) diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py index bc8756ca..ed0a4da2 100644 --- a/tests/unittests/test_handler/test_handler_write_files.py +++ b/tests/unittests/test_handler/test_handler_write_files.py @@ -1,17 +1,16 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config.cc_write_files import write_files, decode_perms -from cloudinit import log as logging -from cloudinit import util - -from cloudinit.tests.helpers import CiTestCase, FilesystemMockingTestCase - import base64 import gzip +import io import shutil -import six import tempfile +from cloudinit import log as logging +from cloudinit import util +from cloudinit.config.cc_write_files import write_files, decode_perms +from cloudinit.tests.helpers import CiTestCase, FilesystemMockingTestCase + LOG = logging.getLogger(__name__) YAML_TEXT = """ @@ -138,7 +137,7 @@ class TestDecodePerms(CiTestCase): def _gzip_bytes(data): - buf = six.BytesIO() + buf = io.BytesIO() fp = None try: fp = gzip.GzipFile(fileobj=buf, mode="wb") diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index cd6296d6..e069a487 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -2,14 +2,15 @@ """Tests for cloudinit.log """ -from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT -from cloudinit import log as ci_logging -from cloudinit.tests.helpers import CiTestCase import datetime +import io import logging -import six import time +from cloudinit import log as ci_logging +from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT +from cloudinit.tests.helpers import CiTestCase + class TestCloudInitLogger(CiTestCase): @@ -18,7 +19,7 @@ class TestCloudInitLogger(CiTestCase): # of sys.stderr, we'll plug in a StringIO() object so we can see # what gets logged logging.Formatter.converter = time.gmtime - self.ci_logs = six.StringIO() + self.ci_logs = io.StringIO() self.ci_root = logging.getLogger() console = logging.StreamHandler(self.ci_logs) console.setFormatter(logging.Formatter(ci_logging.DEF_CON_FORMAT)) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 3a5072c7..10871bcf 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -13,13 +13,11 @@ import glob import os import random import re -import six import string SOURCE_PAT = "source*.*yaml" EXPECTED_PAT = "expected%s.yaml" -TYPES = [dict, str, list, tuple, None] -TYPES.extend(six.integer_types) +TYPES = [dict, str, list, tuple, None, int] def _old_mergedict(src, cand): @@ -85,7 +83,7 @@ def _make_dict(current_depth, max_depth, rand): pass if t in [tuple]: base = tuple(base) - elif t in six.integer_types: + elif t in [int]: base = rand.randint(0, 2 ** 8) elif t in [str]: base = _random_str(rand) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 0e71db82..75a3f0b4 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2,16 +2,15 @@ from __future__ import print_function +import io +import json import logging import os import re import shutil import stat -import tempfile - -import json -import six import sys +import tempfile import yaml from cloudinit import importer, util @@ -320,7 +319,7 @@ class TestLoadYaml(helpers.CiTestCase): def test_python_unicode(self): # complex type of python/unicode is explicitly allowed - myobj = {'1': six.text_type("FOOBAR")} + myobj = {'1': "FOOBAR"} safe_yaml = yaml.dump(myobj) self.assertEqual(util.load_yaml(blob=safe_yaml, default=self.mydefault), @@ -663,8 +662,8 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): self.patchOS(self.root) self.patchUtils(self.root) self.patchOpen(self.root) - self.stdout = six.StringIO() - self.stderr = six.StringIO() + self.stdout = io.StringIO() + self.stderr = io.StringIO() self.patchStdoutAndStderr(self.stdout, self.stderr) def test_stderr_used_by_default(self): @@ -879,8 +878,8 @@ class TestSubp(helpers.CiTestCase): """Raised exc should have stderr, stdout as string if no decode.""" with self.assertRaises(util.ProcessExecutionError) as cm: util.subp([BOGUS_COMMAND], decode=True) - self.assertTrue(isinstance(cm.exception.stdout, six.string_types)) - self.assertTrue(isinstance(cm.exception.stderr, six.string_types)) + self.assertTrue(isinstance(cm.exception.stdout, str)) + self.assertTrue(isinstance(cm.exception.stderr, str)) def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", -- cgit v1.2.3 From 9e3ac98097ed1c7f49ec8975a40aec7229231aae Mon Sep 17 00:00:00 2001 From: Louis Bouchard Date: Wed, 29 Jan 2020 16:55:09 +0100 Subject: Scaleway: Fix DatasourceScaleway to avoid backtrace (#128) Make sure network_config is created when self._network_config is unset. Co-authored-by: Scott Moser --- cloudinit/sources/DataSourceScaleway.py | 9 ++++- tests/unittests/test_datasource/test_scaleway.py | 49 ++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b573b382..83c2bf65 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -188,7 +188,7 @@ class DataSourceScaleway(sources.DataSource): self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) self._fallback_interface = None - self._network_config = None + self._network_config = sources.UNSET def _crawl_metadata(self): resp = url_helper.readurl(self.metadata_address, @@ -227,7 +227,12 @@ class DataSourceScaleway(sources.DataSource): Configure networking according to data received from the metadata API. """ - if self._network_config: + if self._network_config is None: + LOG.warning('Found None as cached _network_config. ' + 'Resetting to %s', sources.UNSET) + self._network_config = sources.UNSET + + if self._network_config != sources.UNSET: return self._network_config if self._fallback_interface is None: diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index f96bf0a2..1b4dd0ad 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,6 +7,7 @@ import requests from cloudinit import helpers from cloudinit import settings +from cloudinit import sources from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -403,3 +404,51 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_unset(self, m_get_cmdline, fallback_nic): + """ + _network_config will be set to sources.UNSET after the first boot. + Make sure it behave correctly. + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = sources.UNSET + + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}]}] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning') + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_cached_none(self, m_get_cmdline, fallback_nic, + logwarning): + """ + network_config() should return config data if cached data is None + rather than sources.UNSET + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = None + + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}]}] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + logwarning.assert_called_with('Found None as cached _network_config. ' + 'Resetting to %s', sources.UNSET) -- cgit v1.2.3 From 5f8f85bb38cc972d3d2c705a1ec73db3f690f323 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 29 Jan 2020 16:55:39 -0500 Subject: Replace mock library with unittest.mock (#186) * cloudinit: replace "import mock" with "from unittest import mock" * test-requirements.txt: drop mock Co-authored-by: Chad Smith --- cloudinit/config/tests/test_set_passwords.py | 2 +- cloudinit/net/tests/test_init.py | 2 +- cloudinit/net/tests/test_network_state.py | 3 ++- cloudinit/sources/tests/test_oracle.py | 2 +- cloudinit/tests/helpers.py | 2 +- cloudinit/tests/test_dhclient_hook.py | 2 +- cloudinit/tests/test_gpg.py | 4 ++-- cloudinit/tests/test_version.py | 4 ++-- test-requirements.txt | 1 - tests/unittests/test_datasource/test_aliyun.py | 2 +- tests/unittests/test_datasource/test_ec2.py | 2 +- tests/unittests/test_datasource/test_gce.py | 2 +- tests/unittests/test_datasource/test_maas.py | 2 +- tests/unittests/test_distros/test_user_data_normalize.py | 3 ++- tests/unittests/test_handler/test_handler_locale.py | 2 +- tests/unittests/test_reporting.py | 4 ++-- tests/unittests/test_reporting_hyperv.py | 2 +- tests/unittests/test_sshutil.py | 2 +- 18 files changed, 22 insertions(+), 21 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index 85e2f1fe..3b5cdd06 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -import mock +from unittest import mock from cloudinit.config import cc_set_passwords as setpass from cloudinit.tests.helpers import CiTestCase diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 6db93e26..5081a337 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -3,10 +3,10 @@ import copy import errno import httpretty -import mock import os import requests import textwrap +from unittest import mock import cloudinit.net as net from cloudinit.util import ensure_file, write_file, ProcessExecutionError diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py index fcb4a995..55880852 100644 --- a/cloudinit/net/tests/test_network_state.py +++ b/cloudinit/net/tests/test_network_state.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -import mock +from unittest import mock + from cloudinit.net import network_state from cloudinit.tests.helpers import CiTestCase diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 6c551fcb..abf3d359 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -11,9 +11,9 @@ import argparse import copy import httpretty import json -import mock import os import uuid +from unittest import mock DS_PATH = "cloudinit.sources.DataSourceOracle" MD_VER = "2013-10-17" diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 0220648d..70f6bad7 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -13,8 +13,8 @@ import string import sys import tempfile import time +from unittest import mock -import mock import unittest2 from unittest2.util import strclass diff --git a/cloudinit/tests/test_dhclient_hook.py b/cloudinit/tests/test_dhclient_hook.py index 7aab8dd5..eadae81c 100644 --- a/cloudinit/tests/test_dhclient_hook.py +++ b/cloudinit/tests/test_dhclient_hook.py @@ -7,8 +7,8 @@ from cloudinit.tests.helpers import CiTestCase, dir2dict, populate_dir import argparse import json -import mock import os +from unittest import mock class TestDhclientHook(CiTestCase): diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py index 0562b966..8dd57137 100644 --- a/cloudinit/tests/test_gpg.py +++ b/cloudinit/tests/test_gpg.py @@ -1,12 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. """Test gpg module.""" +from unittest import mock + from cloudinit import gpg from cloudinit import util from cloudinit.tests.helpers import CiTestCase -import mock - @mock.patch("cloudinit.gpg.time.sleep") @mock.patch("cloudinit.gpg.util.subp") diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py index a96c2a47..778a762c 100644 --- a/cloudinit/tests/test_version.py +++ b/cloudinit/tests/test_version.py @@ -1,10 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit.tests.helpers import CiTestCase from cloudinit import version -import mock - class TestExportsFeatures(CiTestCase): def test_has_network_config_v1(self): diff --git a/test-requirements.txt b/test-requirements.txt index d9d41b57..6fb22b24 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,5 @@ # Needed generally in tests httpretty>=0.7.1 -mock nose unittest2 coverage diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index e9213ca1..1e66fcdb 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -2,8 +2,8 @@ import functools import httpretty -import mock import os +from unittest import mock from cloudinit import helpers from cloudinit.sources import DataSourceAliYun as ay diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 34a089f2..19e1af2b 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -3,7 +3,7 @@ import copy import httpretty import json -import mock +from unittest import mock from cloudinit import helpers from cloudinit.sources import DataSourceEc2 as ec2 diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 67744d32..e9dd6e60 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -7,8 +7,8 @@ import datetime import httpretty import json -import mock import re +from unittest import mock from base64 import b64encode, b64decode from six.moves.urllib_parse import urlparse diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index c84d067e..2a81d3f5 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -1,11 +1,11 @@ # This file is part of cloud-init. See LICENSE file for license information. from copy import copy -import mock import os import shutil import tempfile import yaml +from unittest import mock from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index fa4b6cfe..a6faf0ef 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -1,12 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit import distros from cloudinit.distros import ug_util from cloudinit import helpers from cloudinit import settings from cloudinit.tests.helpers import TestCase -import mock bcfg = { diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py index e29a06f9..b3deb250 100644 --- a/tests/unittests/test_handler/test_handler_locale.py +++ b/tests/unittests/test_handler/test_handler_locale.py @@ -20,10 +20,10 @@ from configobj import ConfigObj from six import BytesIO import logging -import mock import os import shutil import tempfile +from unittest import mock LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index e15ba6cf..6814030e 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -2,12 +2,12 @@ # # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit import reporting from cloudinit.reporting import events from cloudinit.reporting import handlers -import mock - from cloudinit.tests.helpers import TestCase diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index 3582cf0b..b3e083c6 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -8,7 +8,7 @@ import os import struct import time import re -import mock +from unittest import mock from cloudinit import util from cloudinit.tests.helpers import CiTestCase diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index b227c20b..0be41924 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -from mock import patch from collections import namedtuple +from unittest.mock import patch from cloudinit import ssh_util from cloudinit.tests import helpers as test_helpers -- cgit v1.2.3 From 87cd040ed8fe7195cbb357ed3bbf53cd2a81436c Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 19 Feb 2020 15:01:09 -0600 Subject: ec2: Do not log IMDSv2 token values, instead use REDACTED (#219) Instead of logging the token values used log the headers and replace the actual values with the string 'REDACTED'. This allows users to examine cloud-init.log and see that the IMDSv2 token header is being used but avoids leaving the value used in the log file itself. LP: #1863943 --- cloudinit/ec2_utils.py | 12 ++++++++-- cloudinit/sources/DataSourceEc2.py | 35 +++++++++++++++++++---------- cloudinit/url_helper.py | 27 ++++++++++++++++------ tests/unittests/test_datasource/test_ec2.py | 17 ++++++++++++++ 4 files changed, 70 insertions(+), 21 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 57708c14..34acfe84 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -142,7 +142,8 @@ def skip_retry_on_codes(status_codes, _request_args, cause): def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - headers_cb=None, exception_cb=None): + headers_cb=None, headers_redact=None, + exception_cb=None): ud_url = url_helper.combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(ud_url, 'user-data') user_data = '' @@ -155,7 +156,8 @@ def get_instance_userdata(api_version='latest', SKIP_USERDATA_CODES) response = url_helper.read_file_or_url( ud_url, ssl_details=ssl_details, timeout=timeout, - retries=retries, exception_cb=exception_cb, headers_cb=headers_cb) + retries=retries, exception_cb=exception_cb, headers_cb=headers_cb, + headers_redact=headers_redact) user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: @@ -169,11 +171,13 @@ def _get_instance_metadata(tree, api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, + headers_redact=None, exception_cb=None): md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial( url_helper.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries, headers_cb=headers_cb, + headers_redact=headers_redact, exception_cb=exception_cb) def mcaller(url): @@ -197,6 +201,7 @@ def get_instance_metadata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, + headers_redact=None, exception_cb=None): # Note, 'meta-data' explicitly has trailing /. # this is required for CloudStack (LP: #1356855) @@ -204,6 +209,7 @@ def get_instance_metadata(api_version='latest', metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, retries=retries, leaf_decoder=leaf_decoder, + headers_redact=headers_redact, headers_cb=headers_cb, exception_cb=exception_cb) @@ -212,12 +218,14 @@ def get_instance_identity(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, + headers_redact=None, exception_cb=None): return _get_instance_metadata(tree='dynamic/instance-identity', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, retries=retries, leaf_decoder=leaf_decoder, + headers_redact=headers_redact, headers_cb=headers_cb, exception_cb=exception_cb) # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index b9f346a6..0f2bfef4 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -31,6 +31,9 @@ STRICT_ID_DEFAULT = "warn" API_TOKEN_ROUTE = 'latest/api/token' API_TOKEN_DISABLED = '_ec2_disable_api_token' AWS_TOKEN_TTL_SECONDS = '21600' +AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token' +AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds' +AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER] class CloudNames(object): @@ -158,7 +161,8 @@ class DataSourceEc2(sources.DataSource): for api_ver in self.extended_metadata_versions: url = url_tmpl.format(self.metadata_address, api_ver) try: - resp = uhelp.readurl(url=url, headers=headers) + resp = uhelp.readurl(url=url, headers=headers, + headers_redact=AWS_TOKEN_REDACT) except uhelp.UrlError as e: LOG.debug('url %s raised exception %s', url, e) else: @@ -180,6 +184,7 @@ class DataSourceEc2(sources.DataSource): self.identity = ec2.get_instance_identity( api_version, self.metadata_address, headers_cb=self._get_headers, + headers_redact=AWS_TOKEN_REDACT, exception_cb=self._refresh_stale_aws_token_cb).get( 'document', {}) return self.identity.get( @@ -205,7 +210,8 @@ class DataSourceEc2(sources.DataSource): LOG.debug('Fetching Ec2 IMDSv2 API Token') url, response = uhelp.wait_for_url( urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, - headers_cb=self._get_headers, request_method=request_method) + headers_cb=self._get_headers, request_method=request_method, + headers_redact=AWS_TOKEN_REDACT) if url and response: self._api_token = response @@ -252,7 +258,8 @@ class DataSourceEc2(sources.DataSource): url, _ = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds, status_cb=LOG.warning, - headers_cb=self._get_headers, request_method=request_method) + headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers, + request_method=request_method) if url: metadata_address = url2base[url] @@ -420,6 +427,7 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return {} api_version = self.get_metadata_api_version() + redact = AWS_TOKEN_REDACT crawled_metadata = {} if self.cloud_name == CloudNames.AWS: exc_cb = self._refresh_stale_aws_token_cb @@ -429,14 +437,17 @@ class DataSourceEc2(sources.DataSource): try: crawled_metadata['user-data'] = ec2.get_instance_userdata( api_version, self.metadata_address, - headers_cb=self._get_headers, exception_cb=exc_cb_ud) + headers_cb=self._get_headers, headers_redact=redact, + exception_cb=exc_cb_ud) crawled_metadata['meta-data'] = ec2.get_instance_metadata( api_version, self.metadata_address, - headers_cb=self._get_headers, exception_cb=exc_cb) + headers_cb=self._get_headers, headers_redact=redact, + exception_cb=exc_cb) if self.cloud_name == CloudNames.AWS: identity = ec2.get_instance_identity( api_version, self.metadata_address, - headers_cb=self._get_headers, exception_cb=exc_cb) + headers_cb=self._get_headers, headers_redact=redact, + exception_cb=exc_cb) crawled_metadata['dynamic'] = {'instance-identity': identity} except Exception: util.logexc( @@ -455,11 +466,12 @@ class DataSourceEc2(sources.DataSource): if self.cloud_name != CloudNames.AWS: return None LOG.debug("Refreshing Ec2 metadata API token") - request_header = {'X-aws-ec2-metadata-token-ttl-seconds': seconds} + request_header = {AWS_TOKEN_REQ_HEADER: seconds} token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) try: - response = uhelp.readurl( - token_url, headers=request_header, request_method="PUT") + response = uhelp.readurl(token_url, headers=request_header, + headers_redact=AWS_TOKEN_REDACT, + request_method="PUT") except uhelp.UrlError as e: LOG.warning( 'Unable to get API token: %s raised exception %s', @@ -500,8 +512,7 @@ class DataSourceEc2(sources.DataSource): API_TOKEN_DISABLED): return {} # Request a 6 hour token if URL is API_TOKEN_ROUTE - request_token_header = { - 'X-aws-ec2-metadata-token-ttl-seconds': AWS_TOKEN_TTL_SECONDS} + request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS} if API_TOKEN_ROUTE in url: return request_token_header if not self._api_token: @@ -511,7 +522,7 @@ class DataSourceEc2(sources.DataSource): self._api_token = self._refresh_api_token() if not self._api_token: return {} - return {'X-aws-ec2-metadata-token': self._api_token} + return {AWS_TOKEN_PUT_HEADER: self._api_token} class DataSourceEc2Local(DataSourceEc2): diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index f6d68436..eeb27aa8 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -8,6 +8,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import copy import json import os import time @@ -31,6 +32,7 @@ LOG = logging.getLogger(__name__) SSL_ENABLED = False CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) _REQ_VER = None +REDACTED = 'REDACTED' try: from distutils.version import LooseVersion import pkg_resources @@ -189,9 +191,9 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True, exception_cb=None, - session=None, infinite=False, log_req_resp=True, + headers=None, headers_cb=None, headers_redact=None, + ssl_details=None, check_status=True, allow_redirects=True, + exception_cb=None, session=None, infinite=False, log_req_resp=True, request_method=None): """Wrapper around requests.Session to read the url and retry if necessary @@ -207,6 +209,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, :param headers: Optional dict of headers to send during request :param headers_cb: Optional callable returning a dict of values to send as headers during request + :param headers_redact: Optional list of header names to redact from the log :param ssl_details: Optional dict providing key_file, ca_certs, and cert_file keys for use on in ssl connections. :param check_status: Optional boolean set True to raise when HTTPError @@ -233,6 +236,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, req_args['method'] = request_method if timeout is not None: req_args['timeout'] = max(float(timeout), 0) + if headers_redact is None: + headers_redact = [] # It doesn't seem like config # was added in older library versions (or newer ones either), thus we # need to manually do the retries if it wasn't... @@ -277,6 +282,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, if k == 'data': continue filtered_req_args[k] = v + if k == 'headers': + for hkey, _hval in v.items(): + if hkey in headers_redact: + filtered_req_args[k][hkey] = ( + copy.deepcopy(req_args[k][hkey])) + filtered_req_args[k][hkey] = REDACTED try: if log_req_resp: @@ -329,8 +340,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, return None # Should throw before this... -def wait_for_url(urls, max_wait=None, timeout=None, - status_cb=None, headers_cb=None, sleep_time=1, +def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, + headers_cb=None, headers_redact=None, sleep_time=1, exception_cb=None, sleep_time_cb=None, request_method=None): """ urls: a list of urls to try @@ -342,6 +353,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb: call method with string message when a url is not available headers_cb: call method with single argument of url to get headers for request. + headers_redact: a list of header names to redact from the log exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. sleep_time_cb: call method with 2 arguments (response, loop_n) that @@ -405,8 +417,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, headers = {} response = readurl( - url, headers=headers, timeout=timeout, - check_status=False, request_method=request_method) + url, headers=headers, headers_redact=headers_redact, + timeout=timeout, check_status=False, + request_method=request_method) if not response.contents: reason = "empty response [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 19e1af2b..2a96122f 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -429,6 +429,23 @@ class TestEc2(test_helpers.HttprettyTestCase): self.assertTrue(ds.get_data()) self.assertFalse(ds.is_classic_instance()) + def test_aws_token_redacted(self): + """Verify that aws tokens are redacted when logged.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + all_logs = self.logs.getvalue().splitlines() + REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" + REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" + logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] + logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] + logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] + self.assertEqual(1, len(logs_with_redacted_ttl)) + self.assertEqual(79, len(logs_with_redacted)) + self.assertEqual(0, len(logs_with_token)) + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" -- cgit v1.2.3