From f55bb17ddb2fd64e039057bf7ee50951a0dc93e8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Dec 2018 17:22:45 +0000 Subject: Vmware: Add support for the com.vmware.guestInfo OVF transport. This adds support for reading OVF information over the 'com.vmware.guestInfo' tranport. The current implementation requires vmware-rpctool be installed in the system. LP: #1807466 --- tests/unittests/test_datasource/test_ovf.py | 72 ++++++++++++++++++++++++++--- 1 file changed, 66 insertions(+), 6 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index a226c032..e4af0fa3 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -17,6 +17,8 @@ from cloudinit.sources import DataSourceOVF as dsovf from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptNotFound) +MPATH = 'cloudinit.sources.DataSourceOVF.' + OVF_ENV_CONTENT = """ Date: Thu, 20 Dec 2018 20:52:05 +0000 Subject: OVF: simplify expected return values of transport functions. Transport functions (transport_iso9660 and transport_vmware_guestinfo) would return a tuple of 3 values, but only the first was ever used outside of test. The other values (device and filename) were just ignored. This just simplifies the transport functions to now return content (in string format) or None indicating that the transport was not found. --- cloudinit/sources/DataSourceOVF.py | 20 ++++----- tests/unittests/test_datasource/test_ovf.py | 70 ++++++++++------------------- 2 files changed, 33 insertions(+), 57 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 891d6547..3a3fcdf6 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -236,7 +236,7 @@ class DataSourceOVF(sources.DataSource): ('iso', transport_iso9660)] name = None for name, transfunc in np: - (contents, _dev, _fname) = transfunc() + contents = transfunc() if contents: break if contents: @@ -464,8 +464,8 @@ def maybe_cdrom_device(devname): return cdmatch.match(devname) is not None -# Transport functions take no input and return -# a 3 tuple of content, path, filename +# Transport functions are called with no arguments and return +# either None (indicating not present) or string content of an ovf-env.xml def transport_iso9660(require_iso=True): # Go through mounts to see if it was already mounted @@ -477,9 +477,9 @@ def transport_iso9660(require_iso=True): if not maybe_cdrom_device(dev): continue mp = info['mountpoint'] - (fname, contents) = get_ovf_env(mp) + (_fname, contents) = get_ovf_env(mp) if contents is not False: - return (contents, dev, fname) + return contents if require_iso: mtype = "iso9660" @@ -492,27 +492,27 @@ def transport_iso9660(require_iso=True): if maybe_cdrom_device(dev)] for dev in devs: try: - (fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) + (_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) except util.MountFailedError: LOG.debug("%s not mountable as iso9660", dev) continue if contents is not False: - return (contents, dev, fname) + return contents - return (False, None, None) + return None def transport_vmware_guestinfo(): rpctool = "vmware-rpctool" - not_found = (False, None, None) + not_found = None if not util.which(rpctool): return not_found cmd = [rpctool, "info-get guestinfo.ovfEnv"] try: out, _err = util.subp(cmd) if out: - return (out, rpctool, "guestinfo.ovfEnv") + return out LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out) except util.ProcessExecutionError as e: if e.exit_code != 1: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index e4af0fa3..349d54cc 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -19,6 +19,8 @@ from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( MPATH = 'cloudinit.sources.DataSourceOVF.' +NOT_FOUND = None + OVF_ENV_CONTENT = """ Date: Thu, 20 Dec 2018 21:49:09 +0000 Subject: Scaleway: Support ssh keys provided inside an instance tag. The change here will utilize ssh keys found inside an instance's tag. The tag value must start with 'AUTHORIZED_KEY'. --- cloudinit/sources/DataSourceScaleway.py | 11 +++- tests/unittests/test_datasource/test_scaleway.py | 76 ++++++++++++++++++++++-- 2 files changed, 82 insertions(+), 5 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 9dc4ab23..b573b382 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -253,7 +253,16 @@ class DataSourceScaleway(sources.DataSource): return self.metadata['id'] def get_public_ssh_keys(self): - return [key['key'] for key in self.metadata['ssh_public_keys']] + ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']] + + akeypre = "AUTHORIZED_KEY=" + plen = len(akeypre) + for tag in self.metadata.get('tags', []): + if not tag.startswith(akeypre): + continue + ssh_keys.append(tag[:plen].replace("_", " ")) + + return ssh_keys def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata['hostname'] diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index c2bc7a00..f96bf0a2 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -49,6 +49,9 @@ class MetadataResponses(object): FAKE_METADATA = { 'id': '00000000-0000-0000-0000-000000000000', 'hostname': 'scaleway.host', + 'tags': [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ], 'ssh_public_keys': [{ 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', 'fingerprint': '2048 06:ae:... login (RSA)' @@ -204,10 +207,11 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertEqual(self.datasource.get_instance_id(), MetadataResponses.FAKE_METADATA['id']) - self.assertEqual(self.datasource.get_public_ssh_keys(), [ - elem['key'] for elem in - MetadataResponses.FAKE_METADATA['ssh_public_keys'] - ]) + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) self.assertEqual(self.datasource.get_hostname(), MetadataResponses.FAKE_METADATA['hostname']) self.assertEqual(self.datasource.get_userdata_raw(), @@ -218,6 +222,70 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) + def test_ssh_keys_empty(self): + """ + get_public_ssh_keys() should return empty list if no ssh key are + available + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys(), []) + + def test_ssh_keys_only_tags(self): + """ + get_public_ssh_keys() should return list of keys available in tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + ].sort()) + + def test_ssh_keys_only_conf(self): + """ + get_public_ssh_keys() should return list of keys available in + ssh_public_keys field + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + def test_ssh_keys_both(self): + """ + get_public_ssh_keys() should return a merge of keys available + in ssh_public_keys and tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) -- cgit v1.2.3 From 0bb4c74e7f2d008b015d5453a1be88ae807b1f9b Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Thu, 14 Feb 2019 20:37:32 +0000 Subject: EC2: Rewrite network config on AWS Classic instances every boot AWS EC2 instances' network come in 2 basic flavors: Classic and VPC (Virtual Private Cloud). The former has an interesting behavior of having its MAC address changed whenever the instance is stopped/restarted. This behavior is not observed in VPC instances. In Ubuntu 18.04 (Bionic) the network "management" changed from ENI-style (etc/network/interfaces) to netplan, and when using netplan we observe the following block present in /etc/netplan/50-cloud-init.yaml: match: macaddress: aa:bb:cc:dd:ee:ff Jani Ollikainen noticed in Launchpad bug #1802073 that the EC2 Classic instances were booting without network access in Bionic after stop/restart procedure, due to their MAC address change behavior. It was narrowed down to the netplan MAC match block, that kept the old MAC address after stopping and restarting an instance, since the network configuration writing happens by default only once in EC2 instances, in the first boot. This patch changes the network configuration write to every boot in EC2 Classic instances, by checking against the "vpc-id" metadata information provided only in the VPC instances - if we don't have this metadata value, cloud-init will rewrite the network configuration file in every boot. This was tested in an EC2 Classic instance and proved to fix the issue; unit tests were also added for the new method is_classic_instance(). LP: #1802073 Reported-by: Jani Ollikainen Suggested-by: Ryan Harper Co-developed-by: Chad Smith Signed-off-by: Guilherme G. Piccoli --- cloudinit/sources/DataSourceEc2.py | 21 +++++++++++++++++++++ doc/rtd/topics/datasources/ec2.rst | 11 +++++++++++ tests/unittests/test_datasource/test_ec2.py | 24 ++++++++++++++++++++++++ 3 files changed, 56 insertions(+) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index eb6f27b2..4f2f6ccb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -19,6 +19,7 @@ from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util from cloudinit import warnings +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -107,6 +108,19 @@ class DataSourceEc2(sources.DataSource): 'dynamic', {}).get('instance-identity', {}).get('document', {}) return True + def is_classic_instance(self): + """Report if this instance type is Ec2 Classic (non-vpc).""" + if not self.metadata: + # Can return False on inconclusive as we are also called in + # network_config where metadata will be present. + # Secondary call site is in packaging postinst script. + return False + ifaces_md = self.metadata.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + return False + return True + @property def launch_index(self): if not self.metadata: @@ -320,6 +334,13 @@ class DataSourceEc2(sources.DataSource): if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) + # RELEASE_BLOCKER: Xenial debian/postinst needs to add + # EventType.BOOT on upgrade path for classic. + + # Non-VPC (aka Classic) Ec2 instances need to rewrite the + # network config file every boot due to MAC address change. + if self.is_classic_instance(): + self.update_events['network'].add(EventType.BOOT) else: LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst index 64c325d8..76beca92 100644 --- a/doc/rtd/topics/datasources/ec2.rst +++ b/doc/rtd/topics/datasources/ec2.rst @@ -90,4 +90,15 @@ An example configuration with the default values is provided below: max_wait: 120 timeout: 50 +Notes +----- + * There are 2 types of EC2 instances network-wise: VPC ones (Virtual Private + Cloud) and Classic ones (also known as non-VPC). One major difference + between them is that Classic instances have their MAC address changed on + stop/restart operations, so cloud-init will recreate the network config + file for EC2 Classic instances every boot. On VPC instances this file is + generated only in the first boot of the instance. + The check for the instance type is performed by is_classic_instance() + method. + .. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1a5956d9..20d59bfd 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -401,6 +401,30 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.metadata = DEFAULT_METADATA self.assertEqual('my-identity-id', ds.get_instance_id()) + def test_classic_instance_true(self): + """If no vpc-id in metadata, is_classic_instance must return true.""" + md_copy = copy.deepcopy(DEFAULT_METADATA) + ifaces_md = md_copy.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + del mac_data['vpc-id'] + + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': md_copy}) + self.assertTrue(ds.get_data()) + self.assertTrue(ds.is_classic_instance()) + + def test_classic_instance_false(self): + """If vpc-id in metadata, is_classic_instance must return false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + self.assertFalse(ds.is_classic_instance()) + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" -- cgit v1.2.3 From 34f54360fcc1e0f805002a0b639d0a84eb2cb8ee Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 22 Feb 2019 13:26:31 +0000 Subject: azure: Filter list of ssh keys pulled from fabric The Azure data source is expected to expose a list of ssh keys for the user-to-be-provisioned in the crawled metadata. When configured to use the __builtin__ agent this list is built by the WALinuxAgentShim. The shim retrieves the full set of certificates and public keys exposed to the VM from the wireserver, extracts any ssh keys it can, and returns that list. This fix reduces that list of ssh keys to just the ones whose fingerprints appear in the "administrative user" section of the ovf-env.xml file. The Azure control plane exposes other ssh keys to the VM for other reasons, but those should not be added to the authorized_keys file for the provisioned user. --- cloudinit/sources/DataSourceAzure.py | 13 +- cloudinit/sources/helpers/azure.py | 109 ++++++++++----- tests/data/azure/parse_certificates_fingerprints | 4 + tests/data/azure/parse_certificates_pem | 152 +++++++++++++++++++++ tests/data/azure/pubkey_extract_cert | 13 ++ tests/data/azure/pubkey_extract_ssh_key | 1 + .../unittests/test_datasource/test_azure_helper.py | 71 +++++++++- 7 files changed, 322 insertions(+), 41 deletions(-) create mode 100644 tests/data/azure/parse_certificates_fingerprints create mode 100644 tests/data/azure/parse_certificates_pem create mode 100644 tests/data/azure/pubkey_extract_cert create mode 100644 tests/data/azure/pubkey_extract_ssh_key (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a4f998b3..eccbee5a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -627,9 +627,11 @@ class DataSourceAzure(sources.DataSource): if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: self.bounce_network_with_azure_hostname() + pubkey_info = self.cfg.get('_pubkeys', None) metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. - dhclient_lease_file) + dhclient_lease_file, + pubkey_info=pubkey_info) else: metadata_func = self.get_metadata_from_agent @@ -642,6 +644,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False + util.del_file(REPORTED_READY_MARKER_FILE) util.del_file(REPROVISION_MARKER_FILE) return fabric_data @@ -909,13 +912,15 @@ def find_child(node, filter_func): def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. - # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', - # 'path': 'where/to/go'}] + # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', + # 'path': '/where/to/go'}] # # - # ABC/ABC + # ABC/x/y/z # ... # + # Under some circumstances, there may be a element along with the + # Fingerprint and Path. Pass those along if they appear. results = find_child(sshnode, lambda n: n.localName == "PublicKeys") if len(results) == 0: return [] diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index e5696b1f..2829dd20 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -138,9 +138,36 @@ class OpenSSLManager(object): self.certificate = certificate LOG.debug('New certificate generated.') - def parse_certificates(self, certificates_xml): - tag = ElementTree.fromstring(certificates_xml).find( - './/Data') + @staticmethod + def _run_x509_action(action, cert): + cmd = ['openssl', 'x509', '-noout', action] + result, _ = util.subp(cmd, data=cert) + return result + + def _get_ssh_key_from_cert(self, certificate): + pub_key = self._run_x509_action('-pubkey', certificate) + keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + ssh_key, _ = util.subp(keygen_cmd, data=pub_key) + return ssh_key + + def _get_fingerprint_from_cert(self, certificate): + """openssl x509 formats fingerprints as so: + 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ + B6:A8:BF:27:D4:73\n' + + Azure control plane passes that fingerprint as so: + '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + """ + raw_fp = self._run_x509_action('-fingerprint', certificate) + eq = raw_fp.find('=') + octets = raw_fp[eq+1:-1].split(':') + return ''.join(octets) + + def _decrypt_certs_from_xml(self, certificates_xml): + """Decrypt the certificates XML document using the our private key; + return the list of certs and private keys contained in the doc. + """ + tag = ElementTree.fromstring(certificates_xml).find('.//Data') certificates_content = tag.text lines = [ b'MIME-Version: 1.0', @@ -151,32 +178,30 @@ class OpenSSLManager(object): certificates_content.encode('utf-8'), ] with cd(self.tmpdir): - with open('Certificates.p7m', 'wb') as f: - f.write(b'\n'.join(lines)) out, _ = util.subp( - 'openssl cms -decrypt -in Certificates.p7m -inkey' + 'openssl cms -decrypt -in /dev/stdin -inkey' ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' ' -password pass:'.format(**self.certificate_names), - shell=True) - private_keys, certificates = [], [] + shell=True, data=b'\n'.join(lines)) + return out + + def parse_certificates(self, certificates_xml): + """Given the Certificates XML document, return a dictionary of + fingerprints and associated SSH keys derived from the certs.""" + out = self._decrypt_certs_from_xml(certificates_xml) current = [] + keys = {} for line in out.splitlines(): current.append(line) if re.match(r'[-]+END .*?KEY[-]+$', line): - private_keys.append('\n'.join(current)) + # ignore private_keys current = [] elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificates.append('\n'.join(current)) + certificate = '\n'.join(current) + ssh_key = self._get_ssh_key_from_cert(certificate) + fingerprint = self._get_fingerprint_from_cert(certificate) + keys[fingerprint] = ssh_key current = [] - keys = [] - for certificate in certificates: - with cd(self.tmpdir): - public_key, _ = util.subp( - 'openssl x509 -noout -pubkey |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin', - data=certificate, - shell=True) - keys.append(public_key) return keys @@ -206,7 +231,6 @@ class WALinuxAgentShim(object): self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None - self.values = {} self.lease_file = fallback_lease_file def clean_up(self): @@ -328,8 +352,9 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address - def register_with_azure_and_fetch_data(self): - self.openssl_manager = OpenSSLManager() + def register_with_azure_and_fetch_data(self, pubkey_info=None): + if self.openssl_manager is None: + self.openssl_manager = OpenSSLManager() http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') attempts = 0 @@ -347,16 +372,37 @@ class WALinuxAgentShim(object): attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) - public_keys = [] - if goal_state.certificates_xml is not None: + ssh_keys = [] + if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') - public_keys = self.openssl_manager.parse_certificates( + keys_by_fingerprint = self.openssl_manager.parse_certificates( goal_state.certificates_xml) - data = { - 'public-keys': public_keys, - } + ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) self._report_ready(goal_state, http_client) - return data + return {'public-keys': ssh_keys} + + def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): + """cloud-init expects a straightforward array of keys to be dropped + into the user's authorized_keys file. Azure control plane exposes + multiple public keys to the VM via wireserver. Select just the + user's key(s) and return them, ignoring any other certs. + """ + keys = [] + for pubkey in pubkey_info: + if 'value' in pubkey and pubkey['value']: + keys.append(pubkey['value']) + elif 'fingerprint' in pubkey and pubkey['fingerprint']: + fingerprint = pubkey['fingerprint'] + if fingerprint in keys_by_fingerprint: + keys.append(keys_by_fingerprint[fingerprint]) + else: + LOG.warning("ovf-env.xml specified PublicKey fingerprint " + "%s not found in goalstate XML", fingerprint) + else: + LOG.warning("ovf-env.xml specified PublicKey with neither " + "value nor fingerprint: %s", pubkey) + + return keys def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') @@ -373,11 +419,12 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, + pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts) try: - return shim.register_with_azure_and_fetch_data() + return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) finally: shim.clean_up() diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints new file mode 100644 index 00000000..f7293c56 --- /dev/null +++ b/tests/data/azure/parse_certificates_fingerprints @@ -0,0 +1,4 @@ +ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1 +073E19D14D1C799224C6A0FD8DDAB6A8BF27D473 +4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E +929130695289B450FE45DCD5F6EF0CDE69865867 diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem new file mode 100644 index 00000000..3521ea3a --- /dev/null +++ b/tests/data/azure/parse_certificates_pem @@ -0,0 +1,152 @@ +Bag Attributes + localKeyID: 01 00 00 00 + Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP +W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6 +61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz +eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5 +7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ +47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L +Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT +nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5 +lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn +C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb +EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG +x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh ++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU +cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH +gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X +I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB +lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8 +v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed +Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId +0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA +nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe +onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG +WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2 +qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1 +1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt +RyWd+p2lYvFkC/jORQtDMY4uW1o= +-----END PRIVATE KEY----- +Bag Attributes + localKeyID: 02 00 00 00 + Microsoft CSP Name: Microsoft Strong Cryptographic Provider +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4 +FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd +x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW +dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC +gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA +N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua +tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd +0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn +giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61 +LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci +xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh +2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u +n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ +WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+ +R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3 +Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx +E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz +MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9 +SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW +EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9 +8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii +qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU +FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17 +dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz +kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y +R/fA67HXFSTT+OncdRpY1NOn +-----END PRIVATE KEY----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C +k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN +jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe +eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/ +sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo +OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT +bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 01 00 00 00 +subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +-----BEGIN CERTIFICATE----- +MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD +VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES +MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o +Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0 +MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM +CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m +dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB +FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg +ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF +hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI +B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi +quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1 +Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3 +pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg +kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX +R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF +im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e +mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz +Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP +3g== +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 02 00 00 00 +subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted +issuer=/CN=Microsoft.ManagedIdentity +-----BEGIN CERTIFICATE----- +MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL +BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy +MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny +aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz +b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w +dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN +2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee +0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW +2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw +tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw +Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P +AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD +VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB +AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe +7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b +7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0 +jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38 +UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC +pkSoWwF1QAnHn0eokR9E1rU= +-----END CERTIFICATE----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert new file mode 100644 index 00000000..ce9b852d --- /dev/null +++ b/tests/data/azure/pubkey_extract_cert @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key new file mode 100644 index 00000000..54d749ed --- /dev/null +++ b/tests/data/azure/pubkey_extract_ssh_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 26b2b93d..02556165 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1,11 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import unittest2 from textwrap import dedent from cloudinit.sources.helpers import azure as azure_helper from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir +from cloudinit.util import load_file from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim GOAL_STATE_TEMPLATE = """\ @@ -289,6 +291,50 @@ class TestOpenSSLManager(CiTestCase): self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) +class TestOpenSSLManagerActions(CiTestCase): + + def setUp(self): + super(TestOpenSSLManagerActions, self).setUp() + + self.allowed_subp = True + + def _data_file(self, name): + path = 'tests/data/azure' + return os.path.join(path, name) + + @unittest2.skip("todo move to cloud_test") + def test_pubkey_extract(self): + cert = load_file(self._data_file('pubkey_extract_cert')) + good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + sslmgr = azure_helper.OpenSSLManager() + key = sslmgr._get_ssh_key_from_cert(cert) + self.assertEqual(good_key, key) + + good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + fingerprint = sslmgr._get_fingerprint_from_cert(cert) + self.assertEqual(good_fingerprint, fingerprint) + + @unittest2.skip("todo move to cloud_test") + @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + def test_parse_certificates(self, mock_decrypt_certs): + """Azure control plane puts private keys as well as certificates + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. + """ + cert_contents = load_file(self._data_file('parse_certificates_pem')) + fingerprints = load_file(self._data_file( + 'parse_certificates_fingerprints') + ).splitlines() + mock_decrypt_certs.return_value = cert_contents + sslmgr = azure_helper.OpenSSLManager() + keys_by_fp = sslmgr.parse_certificates('') + for fp in keys_by_fp.keys(): + self.assertIn(fp, fingerprints) + for fp in fingerprints: + self.assertIn(fp, keys_by_fp) + + class TestWALinuxAgentShim(CiTestCase): def setUp(self): @@ -329,18 +375,31 @@ class TestWALinuxAgentShim(CiTestCase): def test_certificates_used_to_determine_public_keys(self): shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + """if register_with_azure_and_fetch_data() isn't passed some info about + the user's public keys, there's no point in even trying to parse + the certificates + """ + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, + {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] + certs = {'fp1': 'expected-key', + 'fp2': 'should-not-be-found', + 'fp3': 'expected-no-value-key', + } + sslmgr = self.OpenSSLManager.return_value + sslmgr.parse_certificates.return_value = certs + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], - self.OpenSSLManager.return_value.parse_certificates.call_args_list) - self.assertEqual( - self.OpenSSLManager.return_value.parse_certificates.return_value, - data['public-keys']) + sslmgr.parse_certificates.call_args_list) + self.assertIn('expected-key', data['public-keys']) + self.assertIn('expected-no-value-key', data['public-keys']) + self.assertNotIn('should-not-be-found', data['public-keys']) def test_absent_certificates_produces_empty_public_keys(self): + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] self.GoalState.return_value.certificates_xml = None shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual([], data['public-keys']) def test_correct_url_used_for_report_ready(self): -- cgit v1.2.3 From 1182ad5f9362e1570c622345a3ac996c07eb2eeb Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 26 Feb 2019 15:37:36 +0000 Subject: tests: fix some slow tests and some leaking state In test_ds_identify, don't mutate otherwise-static test data. When running tests in a random order, this was causing failures due to breaking preconditions for other tests. In tests/helpers, reset logging level in tearDown. Some of the CLI tests set the level of the root logger in a way that isn't correctly reset. For test_poll_imds_re_dhcp_on_timeout and test_dhcp_discovery_run_in_sandbox_warns_invalid_pid, mock out time.sleep; this saves ~11 seconds (or ~40% of previous test time!). --- cloudinit/net/tests/test_dhcp.py | 1 + cloudinit/tests/helpers.py | 1 + tests/unittests/test_datasource/test_azure.py | 1 + tests/unittests/test_ds_identify.py | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 79e8842f..51390249 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -117,6 +117,7 @@ class TestDHCPDiscoveryClean(CiTestCase): self.assertEqual('eth9', call[0][1]) self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp, diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 2eb7b0cd..46a49416 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -207,6 +207,7 @@ class CiTestCase(TestCase): if self.with_logs: # Remove the handler we setup logging.getLogger().handlers = self.old_handlers + logging.getLogger().level = None util.subp = _real_subp super(CiTestCase, self).tearDown() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 417d86a9..5edf36e8 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1692,6 +1692,7 @@ class TestPreprovisioningPollIMDS(CiTestCase): self.paths = helpers.Paths({'cloud_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch(MOCKPATH + 'EphemeralDHCPv4') def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, report_ready_func, fake_resp, m_media_switch, m_dhcp, diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 756b4fb4..d00c1b4b 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -441,7 +441,7 @@ class TestDsIdentify(DsIdentifyBase): nova does not identify itself on platforms other than intel. https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova""" - data = VALID_CFG['OpenStack'].copy() + data = copy.deepcopy(VALID_CFG['OpenStack']) del data['files'][P_PRODUCT_NAME] data.update({'policy_dmi': POLICY_FOUND_OR_MAYBE, 'policy_no_dmi': POLICY_FOUND_OR_MAYBE}) -- cgit v1.2.3 From eee0e09ead3d11c32e8888d13d164810ee5f19d6 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 4 Mar 2019 16:50:31 +0000 Subject: tip-pylint: Fix assignment-from-return-none errors pylint now complains about assignment of None from a return of a function call. This does not account for subclassing so we resolve this issue by removing the assignment in the unittest. --- tests/unittests/test_datasource/test_configdrive.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index dcdabea5..7a6802f6 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -268,8 +268,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', side_effect=exists_side_effect())) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) self.assertEqual(exists_mock.call_count, 2) @@ -296,8 +295,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', return_value=True)) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) exists_mock.assert_called_once_with(mock.ANY) @@ -331,8 +329,7 @@ class TestConfigDriveDataSource(CiTestCase): yield True with mock.patch.object(os.path, 'exists', side_effect=exists_side_effect()): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) # We don't assert the call count for os.path.exists() because # not all of the entries in name_tests results in two calls to # that function. Specifically, 'root2k' doesn't seem to call @@ -359,8 +356,7 @@ class TestConfigDriveDataSource(CiTestCase): } for name, dev_name in name_tests.items(): with mock.patch.object(os.path, 'exists', return_value=True): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) def test_dir_valid(self): """Verify a dir is read as such.""" -- cgit v1.2.3 From edf052c3196139169ecbfe98049c278f4babc8ca Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Mar 2019 18:21:59 +0000 Subject: drop Python 2.6 support and our NIH version detection - Remove the last few places that use `if PY26` - Replace our Python version detection logic with six's (which we were already using in most places) --- cloudinit/tests/helpers.py | 22 +--------------------- cloudinit/util.py | 4 ---- tests/unittests/test_datasource/test_azure.py | 4 +--- 3 files changed, 2 insertions(+), 28 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 46a49416..f41180fd 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -41,26 +41,6 @@ _real_subp = util.subp SkipTest = unittest2.SkipTest skipIf = unittest2.skipIf -# Used for detecting different python versions -PY2 = False -PY26 = False -PY27 = False -PY3 = False - -_PY_VER = sys.version_info -_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3] -if (_PY_MAJOR, _PY_MINOR) <= (2, 6): - if (_PY_MAJOR, _PY_MINOR) == (2, 6): - PY26 = True - if (_PY_MAJOR, _PY_MINOR) >= (2, 0): - PY2 = True -else: - if (_PY_MAJOR, _PY_MINOR) == (2, 7): - PY27 = True - PY2 = True - if (_PY_MAJOR, _PY_MINOR) >= (3, 0): - PY3 = True - # Makes the old path start # with new base instead of whatever @@ -357,7 +337,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchOpen(self, new_root): trap_func = retarget_many_wrapper(new_root, 1, open) - name = 'builtins.open' if PY3 else '__builtin__.open' + name = 'builtins.open' if six.PY3 else '__builtin__.open' self.patched_funcs.enter_context(mock.patch(name, trap_func)) def patchStdoutAndStderr(self, stdout=None, stderr=None): diff --git a/cloudinit/util.py b/cloudinit/util.py index e5403f7d..a192091f 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -72,7 +72,6 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], PROC_CMDLINE = None _LSB_RELEASE = {} -PY26 = sys.version_info[0:2] == (2, 6) def get_architecture(target=None): @@ -2815,9 +2814,6 @@ def load_shell_content(content, add_empty=False, empty_val=None): variables. Set their value to empty_val.""" def _shlex_split(blob): - if PY26 and isinstance(blob, six.text_type): - # Older versions don't support unicode input - blob = blob.encode("utf8") return shlex.split(blob, comments=True) data = {} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 5edf36e8..6b05b8f1 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -11,7 +11,7 @@ from cloudinit.util import (b64e, decode_binary, load_file, write_file, from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack, PY26, SkipTest) + ExitStack) import crypt import httpretty @@ -221,8 +221,6 @@ class TestAzureDataSource(CiTestCase): def setUp(self): super(TestAzureDataSource, self).setUp() - if PY26: - raise SkipTest("Does not work on python 2.6") self.tmp = self.tmp_dir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty -- cgit v1.2.3 From 5352dd99eb2937b4eaaaf596b40ad7ca69d87f64 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Mar 2019 18:41:05 +0000 Subject: helpers/openstack: Treat unknown link types as physical Some deployments of OpenStack expose link types to the guest which cloud-init doesn't recognise. These will almost always be physical, so we can operate more robustly if we assume that they are (whilst warning the user that we're seeing something unexpected). LP: #1639263 --- cloudinit/sources/helpers/openstack.py | 12 +++++------ .../unittests/test_datasource/test_configdrive.py | 23 ++++++++++++++++++++++ 2 files changed, 29 insertions(+), 6 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9c29ceac..8f069115 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -67,7 +67,7 @@ OS_VERSIONS = ( OS_ROCKY, ) -PHYSICAL_TYPES = ( +KNOWN_PHYSICAL_TYPES = ( None, 'bgpovs', # not present in OpenStack upstream but used on OVH cloud. 'bridge', @@ -600,9 +600,7 @@ def convert_net_json(network_json=None, known_macs=None): subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in PHYSICAL_TYPES: - cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) - elif link['type'] in ['bond']: + if link['type'] in ['bond']: params = {} if link_mac_addr: params['mac_address'] = link_mac_addr @@ -641,8 +639,10 @@ def convert_net_json(network_json=None, known_macs=None): curinfo.update({'mac': link['vlan_mac_address'], 'name': name}) else: - raise ValueError( - 'Unknown network_data link type: %s' % link['type']) + if link['type'] not in KNOWN_PHYSICAL_TYPES: + LOG.warning('Unknown network_data link type (%s); treating as' + ' physical', link['type']) + cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) config.append(cfg) link_id_info[curinfo['id']] = curinfo diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 7a6802f6..520c50fe 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -600,6 +600,9 @@ class TestNetJson(CiTestCase): class TestConvertNetworkData(CiTestCase): + + with_logs = True + def setUp(self): super(TestConvertNetworkData, self).setUp() self.tmp = self.tmp_dir() @@ -726,6 +729,26 @@ class TestConvertNetworkData(CiTestCase): 'enp0s2': 'fa:16:3e:d4:57:ad'} self.assertEqual(expected, config_name2mac) + def test_unknown_device_types_accepted(self): + # If we don't recognise a link, we should treat it as physical for a + # best-effort boot + my_netdata = deepcopy(NETWORK_DATA) + my_netdata['links'][0]['type'] = 'my-special-link-type' + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + # We should, however, warn the user that we don't recognise the type + self.assertIn('Unknown network_data link type (my-special-link-type)', + self.logs.getvalue()) + def cfg_ds_from_dir(base_d, files=None): run = os.path.join(base_d, "run") -- cgit v1.2.3 From f2fd6eac4407e60d0e98826ab03847dda4cde138 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 14 Mar 2019 23:06:47 +0000 Subject: DataSource: move update_events from a class to an instance attribute Currently, DataSourceAzure updates self.update_events in __init__. As update_events is a class attribute on DataSource, this updates it for all instances of classes derived from DataSource including those for other clouds. This means that if DataSourceAzure is even instantiated, its behaviour is applied to whichever data source ends up being used for boot. To address this, update_events is moved from a class attribute to an instance attribute (that is therefore populated at instantiation time). This retains the defaults for all DataSource sub-class instances, but avoids them being able to mutate the state in instances of other DataSource sub-classes. update_events is only ever referenced on an instance of DataSource (or a sub-class); no code relies on it being a class attribute. (In fact, it's only used within methods on DataSource or its sub-classes, so it doesn't even _need_ to remain public, though I think it's appropriate for it to be public.) DataSourceScaleway is also updated to move update_events from a class attribute to an instance attribute, as the class attribute would now be masked by the DataSource instance attribute. LP: #1819913 --- cloudinit/sources/DataSourceScaleway.py | 3 ++- cloudinit/sources/__init__.py | 6 +++--- cloudinit/sources/tests/test_init.py | 15 +++++++++++++++ tests/unittests/test_datasource/test_scaleway.py | 7 +++++++ 4 files changed, 27 insertions(+), 4 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b573b382..54bfc1fe 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -171,10 +171,11 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" - update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) + self.update_events = { + 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6966b31..1604932d 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -164,9 +164,6 @@ class DataSource(object): # A datasource which supports writing network config on each system boot # would call update_events['network'].add(EventType.BOOT). - # Default: generate network config on new instance id (first boot). - update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} - # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute # values are reset via clear_cached_attrs during any update_metadata call. @@ -191,6 +188,9 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None + # Default: generate network config on new instance id (first boot). + self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} + self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 6378e98b..cb1912be 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -575,6 +575,21 @@ class TestDataSource(CiTestCase): " events: New instance first boot", self.logs.getvalue()) + def test_data_sources_cant_mutate_update_events_for_others(self): + """update_events shouldn't be changed for other DSes (LP: #1819913)""" + + class ModifyingDS(DataSource): + + def __init__(self, sys_cfg, distro, paths): + # This mirrors what DataSourceAzure does which causes LP: + # #1819913 + DataSource.__init__(self, sys_cfg, distro, paths) + self.update_events['network'].add(EventType.BOOT) + + before_update_events = copy.deepcopy(self.datasource.update_events) + ModifyingDS(self.sys_cfg, self.distro, self.paths) + self.assertEqual(before_update_events, self.datasource.update_events) + class TestRedactSensitiveData(CiTestCase): diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index f96bf0a2..3bfd7527 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,6 +7,7 @@ import requests from cloudinit import helpers from cloudinit import settings +from cloudinit.event import EventType from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -403,3 +404,9 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') + + def test_update_events_is_correct(self): + """ensure update_events contains correct data""" + self.assertEqual( + {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, + self.datasource.update_events) -- cgit v1.2.3 From 0dc3a77f41f4544e4cb5a41637af7693410d4cdf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Tue, 26 Mar 2019 18:53:50 +0000 Subject: Azure: Ensure platform random_seed is always serializable as JSON. The Azure platform surfaces random bytes into /sys via Hyper-V. Python 2.7 json.dump() raises an exception if asked to convert a str with non-character content, and python 3.0 json.dump() won't serialize a "bytes" value. As a result, c-i instance data is often not written by Azure, making reboots slower (c-i has to repeat work). The random data is base64-encoded and then decoded into a string (str or unicode depending on the version of Python in use). The base64 string has just as many bits of entropy, so we're not throwing away useful "information", but we can be certain json.dump() will correctly serialize the bits. --- cloudinit/sources/DataSourceAzure.py | 24 +++++++++++++++++++----- tests/data/azure/non_unicode_random_string | 1 + tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++++++++-- 3 files changed, 42 insertions(+), 7 deletions(-) create mode 100644 tests/data/azure/non_unicode_random_string (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index eccbee5a..b4e3f061 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -54,6 +54,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' IMDS_URL = "http://169.254.169.254/metadata/" +PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by # stock ubuntu suported images. @@ -195,6 +196,8 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") + # TODO Find where platform entropy data is surfaced + PLATFORM_ENTROPY_SOURCE = None BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -1100,16 +1103,27 @@ def _check_freebsd_cdrom(cdrom_dev): return False -def _get_random_seed(): +def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" # azure / hyper-v provides random data here - # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config - if util.is_FreeBSD(): + if source is None: return None - return util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) + seed = util.load_file(source, quiet=True, decode=False) + + # The seed generally contains non-Unicode characters. load_file puts + # them into a str (in python 2) or bytes (in python 3). In python 2, + # bad octets in a str cause util.json_dumps() to throw an exception. In + # python 3, bytes is a non-serializable type, and the handler load_file + # uses applies b64 encoding *again* to handle it. The simplest solution + # is to just b64encode the data and then decode it to a serializable + # string. Same number of bits of entropy, just with 25% more zeroes. + # There's no need to undo this base64-encoding when the random seed is + # actually used in cc_seed_random.py. + seed = base64.b64encode(seed).decode() + + return seed def list_possible_azure_ds_devs(): diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string new file mode 100644 index 00000000..b9ecefb9 --- /dev/null +++ b/tests/data/azure/non_unicode_random_string @@ -0,0 +1 @@ +OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$ \ No newline at end of file diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 6b05b8f1..53c56cd0 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -7,11 +7,11 @@ from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, find_freebsd_part, get_path_dev_freebsd, - MountFailedError) + MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack) + ExitStack, resourceLocation) import crypt import httpretty @@ -1923,4 +1923,24 @@ class TestWBIsPlatformViable(CiTestCase): self.logs.getvalue()) +class TestRandomSeed(CiTestCase): + """Test proper handling of random_seed""" + + def test_non_ascii_seed_is_serializable(self): + """Pass if a random string from the Azure infrastructure which + contains at least one non-Unicode character can be converted to/from + JSON without alteration and without throwing an exception. + """ + path = resourceLocation("azure/non_unicode_random_string") + result = dsaz._get_random_seed(path) + + obj = {'seed': result} + try: + serialized = json_dumps(obj) + deserialized = load_json(serialized) + except UnicodeDecodeError: + self.fail("Non-serializable random seed returned") + + self.assertEqual(deserialized['seed'], result) + # vi: ts=4 expandtab -- cgit v1.2.3 From b76714c355a87416f9f07156b0f025aceaca7296 Mon Sep 17 00:00:00 2001 From: Risto Oikarinen Date: Tue, 9 Apr 2019 18:05:24 +0000 Subject: Change DataSourceNoCloud to ignore file system label's case. NoCloud data source now accepts both 'cidata' and 'CIDATA' as filesystem labels. This is similar to DataSourceConfigDrive's support for 'config-2' and 'CONFIG-2'. --- cloudinit/sources/DataSourceNoCloud.py | 4 ++- doc/rtd/topics/datasources/nocloud.rst | 2 +- tests/unittests/test_datasource/test_nocloud.py | 42 +++++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 17 ++++++++++ tools/ds-identify | 7 +++-- 5 files changed, 67 insertions(+), 5 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 6860f0cc..fcf5d589 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -106,7 +106,9 @@ class DataSourceNoCloud(sources.DataSource): fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) - label_list = util.find_devs_with("LABEL=%s" % label) + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + devlist = list(set(fslist) & set(label_list)) devlist.sort(reverse=True) diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index 08578e86..1c5cf961 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -9,7 +9,7 @@ network at all). You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be -``cidata``. +``cidata`` or ``CIDATA``. Alternatively, you can provide meta-data via kernel command line or SMBIOS "serial number" option. The data must be passed in the form of a string: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 3429272c..b785362f 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'read_dmi_data', return_value=None)) + def _test_fs_config_is_read(self, fs_label, fs_label_to_search): + vfat_device = 'device-1' + + def m_mount_cb(device, callback, mtype): + if (device == vfat_device): + return {'meta-data': yaml.dump({'instance-id': 'IID'})} + else: + return {} + + def m_find_devs_with(query='', path=''): + if 'TYPE=vfat' == query: + return [vfat_device] + elif 'LABEL={}'.format(fs_label) == query: + return [vfat_device] + else: + return [] + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=m_find_devs_with)) + self.mocks.enter_context( + mock.patch.object(util, 'mount_cb', + side_effect=m_mount_cb)) + sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = b"USER_DATA_HERE" @@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase): ret = dsrc.get_data() self.assertFalse(ret) + def test_fs_config_lowercase_label(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'cidata') + + def test_fs_config_uppercase_label(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'cidata') + + def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'CIDATA') + + def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'CIDATA') + def test_no_datasource_expected(self, m_is_lxd): # no source should be found if no cmdline, config, and fs_label=None sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index d00c1b4b..8c18aa1a 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -520,6 +520,10 @@ class TestDsIdentify(DsIdentifyBase): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') + def test_nocloud_upper(self): + """NoCloud is found with uppercase filesystem label.""" + self._test_ds_found('NoCloudUpper') + def test_nocloud_seed(self): """Nocloud seed directory.""" self._test_ds_found('NoCloud-seed') @@ -713,6 +717,19 @@ VALID_CFG = { 'dev/vdb': 'pretend iso content for cidata\n', } }, + 'NoCloudUpper': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'NoCloud-seed': { 'ds': 'NoCloud', 'files': { diff --git a/tools/ds-identify b/tools/ds-identify index b78b2731..6518901e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -620,7 +620,7 @@ dscheck_MAAS() { } dscheck_NoCloud() { - local fslabel="cidata" d="" + local fslabel="cidata CIDATA" d="" case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac @@ -632,9 +632,10 @@ dscheck_NoCloud() { check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done - if has_fs_with_label "${fslabel}"; then + if has_fs_with_label $fslabel; then return ${DS_FOUND} fi + return ${DS_NOT_FOUND} } @@ -762,7 +763,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From 6322c2ddf4b68a8e7cc467a07fb20a1d151a2ef3 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 10 Apr 2019 20:21:37 +0000 Subject: Revert "DataSource: move update_events from a class to an instance..." Moving update_events from a class attribute to an instance attribute means that it doesn't exist on DataSource objects that are unpickled, causing tracebacks on cloud-init upgrade. As this change is only required for cloud-init installations which don't utilise ds-identify, we're backing it out to be reintroduced once the upgrade path bug has been addressed. This reverts commit f2fd6eac4407e60d0e98826ab03847dda4cde138. --- cloudinit/sources/DataSourceScaleway.py | 3 +-- cloudinit/sources/__init__.py | 6 +++--- cloudinit/sources/tests/test_init.py | 15 --------------- tests/unittests/test_datasource/test_scaleway.py | 7 ------- 4 files changed, 4 insertions(+), 27 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 54bfc1fe..b573b382 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -171,11 +171,10 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" + update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) - self.update_events = { - 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 1604932d..e6966b31 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -164,6 +164,9 @@ class DataSource(object): # A datasource which supports writing network config on each system boot # would call update_events['network'].add(EventType.BOOT). + # Default: generate network config on new instance id (first boot). + update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} + # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute # values are reset via clear_cached_attrs during any update_metadata call. @@ -188,9 +191,6 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None - # Default: generate network config on new instance id (first boot). - self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} - self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index cb1912be..6378e98b 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -575,21 +575,6 @@ class TestDataSource(CiTestCase): " events: New instance first boot", self.logs.getvalue()) - def test_data_sources_cant_mutate_update_events_for_others(self): - """update_events shouldn't be changed for other DSes (LP: #1819913)""" - - class ModifyingDS(DataSource): - - def __init__(self, sys_cfg, distro, paths): - # This mirrors what DataSourceAzure does which causes LP: - # #1819913 - DataSource.__init__(self, sys_cfg, distro, paths) - self.update_events['network'].add(EventType.BOOT) - - before_update_events = copy.deepcopy(self.datasource.update_events) - ModifyingDS(self.sys_cfg, self.distro, self.paths) - self.assertEqual(before_update_events, self.datasource.update_events) - class TestRedactSensitiveData(CiTestCase): diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 3bfd7527..f96bf0a2 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,7 +7,6 @@ import requests from cloudinit import helpers from cloudinit import settings -from cloudinit.event import EventType from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -404,9 +403,3 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') - - def test_update_events_is_correct(self): - """ensure update_events contains correct data""" - self.assertEqual( - {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, - self.datasource.update_events) -- cgit v1.2.3 From c8c32515778983d244126d4e359be9e91b3ce9e5 Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Thu, 18 Apr 2019 21:23:36 +0000 Subject: test_azure: mock util.SeLinuxGuard where needed Mock util.SeLinuxGuard to do nothing within tests that mock functions used by the guard, when those mocks confuse the guard. This has no impact when executing unit tests on systems which do not enable selinux (e.g. Ubuntu). LP: #1825253 --- tests/unittests/test_datasource/test_azure.py | 3 +++ tests/unittests/test_net.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'tests/unittests/test_datasource') diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 53c56cd0..ab77c034 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1375,12 +1375,15 @@ class TestCanDevBeReformatted(CiTestCase): self._domock(p + "util.mount_cb", 'm_mount_cb') self._domock(p + "os.path.realpath", 'm_realpath') self._domock(p + "os.path.exists", 'm_exists') + self._domock(p + "util.SeLinuxGuard", 'm_selguard') self.m_exists.side_effect = lambda p: p in bypath self.m_realpath.side_effect = realpath self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs self.m_mount_cb.side_effect = mount_cb self.m_partitions_on_device.side_effect = partitions_on_device + self.m_selguard.__enter__ = mock.Mock(return_value=False) + self.m_selguard.__exit__ = mock.Mock() def test_three_partitions_is_false(self): """A disk with 3 partitions can not be formatted.""" diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index fd03deb6..ca6ef97d 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3269,9 +3269,12 @@ class TestNetplanPostcommands(CiTestCase): mock_netplan_generate.assert_called_with(run=True) mock_net_setup_link.assert_called_with(run=True) + @mock.patch('cloudinit.util.SeLinuxGuard') @mock.patch.object(netplan, "get_devicelist") @mock.patch('cloudinit.util.subp') - def test_netplan_postcmds(self, mock_subp, mock_devlist): + def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): + mock_sel.__enter__ = mock.Mock(return_value=False) + mock_sel.__exit__ = mock.Mock() mock_devlist.side_effect = [['lo']] tmp_dir = self.tmp_dir() ns = network_state.parse_net_config_data(self.mycfg, -- cgit v1.2.3 From ab6621d849b24bb652243e88c79f6f3b446048d7 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 8 May 2019 14:54:03 +0000 Subject: DataSourceAzure: Adjust timeout for polling IMDS If the IMDS primary server is not available, falling back to the secondary server takes about 1s. The net result is that the expected E2E time is slightly more than 1s. This change increases the timeout to 2s to prevent the infinite loop of timeouts. --- cloudinit/sources/DataSourceAzure.py | 15 ++++++++++----- tests/unittests/test_datasource/test_azure.py | 10 +++++++--- 2 files changed, 17 insertions(+), 8 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 64165259..b7440c1d 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -57,7 +57,12 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' + +# In the event where the IMDS primary server is not +# available, it takes 1s to fallback to the secondary one +IMDS_TIMEOUT_IN_SECONDS = 2 IMDS_URL = "http://169.254.169.254/metadata/" + PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by @@ -582,9 +587,9 @@ class DataSourceAzure(sources.DataSource): return self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True, - log_req_resp=False).contents + return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, exception_cb=exc_cb, + infinite=True, log_req_resp=False).contents except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -1291,8 +1296,8 @@ def _get_metadata_from_imds(retries): headers = {"Metadata": "true"} try: response = readurl( - url, timeout=1, headers=headers, retries=retries, - exception_cb=retry_on_url_exc) + url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, + retries=retries, exception_cb=retry_on_url_exc) except Exception as e: LOG.debug('Ignoring IMDS instance metadata: %s', e) return {} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index ab77c034..427ab7e7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -163,7 +163,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): m_readurl.assert_called_with( self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, timeout=1) + headers={'Metadata': 'true'}, retries=2, + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) @mock.patch('cloudinit.url_helper.time.sleep') @mock.patch(MOCKPATH + 'net.is_up') @@ -1791,7 +1792,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs() - }, method='GET', timeout=1, + }, method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( @@ -1828,7 +1830,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs()}, - method='GET', timeout=1, url=full_url)]) + method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', -- cgit v1.2.3 From baa478546d8cac98a706010699d64f8c2f70b5bf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 10 May 2019 18:38:55 +0000 Subject: Azure: Return static fallback address as if failed to find endpoint The Azure data source helper attempts to use information in the dhcp lease to find the Wireserver endpoint (IP address). Under some unusual circumstances, those attempts will fail. This change uses a static address, known to be always correct in the Azure public and sovereign clouds, when the helper fails to locate a valid dhcp lease. This address is not guaranteed to be correct in Azure Stack environments; it's still best to use the information from the lease whenever possible. --- cloudinit/sources/helpers/azure.py | 14 +++++++++++--- tests/unittests/test_datasource/test_azure_helper.py | 9 +++++++-- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index d3af05ee..82c4c8c4 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -20,6 +20,9 @@ from cloudinit.reporting import events LOG = logging.getLogger(__name__) +# This endpoint matches the format as found in dhcp lease files, since this +# value is applied if the endpoint can't be found within a lease file +DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" azure_ds_reporter = events.ReportEventStack( name="azure-ds", @@ -297,7 +300,12 @@ class WALinuxAgentShim(object): @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] - content = util.load_file(fallback_lease_file) + try: + content = util.load_file(fallback_lease_file) + except IOError as ex: + LOG.error("Failed to read %s: %s", fallback_lease_file, ex) + return None + LOG.debug("content is %s", content) option_name = _get_dhcp_endpoint_option_name() for line in content.splitlines(): @@ -372,9 +380,9 @@ class WALinuxAgentShim(object): fallback_lease_file) value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) - if value is None: - raise ValueError('No endpoint found.') + LOG.warning("No lease found; using default endpoint") + value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 02556165..bd006aba 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -67,12 +67,17 @@ class TestFindEndpoint(CiTestCase): self.networkd_leases.return_value = None def test_missing_file(self): - self.assertRaises(ValueError, wa_shim.find_endpoint) + """wa_shim find_endpoint uses default endpoint if leasefile not found + """ + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") def test_missing_special_azure_line(self): + """wa_shim find_endpoint uses default endpoint if leasefile is found + but does not contain DHCP Option 245 (whose value is the endpoint) + """ self.load_file.return_value = '' self.dhcp_options.return_value = {'eth0': {'key': 'value'}} - self.assertRaises(ValueError, wa_shim.find_endpoint) + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") @staticmethod def _build_lease_content(encoded_address): -- cgit v1.2.3 From 0f8695323262e41c699588c7cd140f6b58c62017 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Fri, 24 May 2019 21:39:19 +0000 Subject: freebsd: NoCloud data source support blkid is a Linux-only command. With this patch, cloud-init uses another approach to find the data source on FreeBSD. LP: #1645824 --- cloudinit/sources/DataSourceNoCloud.py | 40 ++++++++++++++----------- config/cloud.cfg.tmpl | 4 +-- tests/unittests/test_datasource/test_nocloud.py | 18 +++++++++++ 3 files changed, 43 insertions(+), 19 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index fcf5d589..8a9e5dd2 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -35,6 +35,26 @@ class DataSourceNoCloud(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) + def _get_devices(self, label): + if util.is_FreeBSD(): + devlist = [ + p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label] + if os.path.exists(p)] + else: + # Query optical drive to get it in blkid cache for 2.6 kernels + util.find_devs_with(path="/dev/sr0") + util.find_devs_with(path="/dev/sr1") + + fslist = util.find_devs_with("TYPE=vfat") + fslist.extend(util.find_devs_with("TYPE=iso9660")) + + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + + devlist = list(set(fslist) & set(label_list)) + devlist.sort(reverse=True) + return devlist + def _get_data(self): defaults = { "instance-id": "nocloud", @@ -99,20 +119,7 @@ class DataSourceNoCloud(sources.DataSource): label = self.ds_cfg.get('fs_label', "cidata") if label is not None: - # Query optical drive to get it in blkid cache for 2.6 kernels - util.find_devs_with(path="/dev/sr0") - util.find_devs_with(path="/dev/sr1") - - fslist = util.find_devs_with("TYPE=vfat") - fslist.extend(util.find_devs_with("TYPE=iso9660")) - - label_list = util.find_devs_with("LABEL=%s" % label.upper()) - label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) - - devlist = list(set(fslist) & set(label_list)) - devlist.sort(reverse=True) - - for dev in devlist: + for dev in self._get_devices(label): try: LOG.debug("Attempting to use data from %s", dev) @@ -120,9 +127,8 @@ class DataSourceNoCloud(sources.DataSource): seeded = util.mount_cb(dev, _pp2d_callback, pp2d_kwargs) except ValueError: - if dev in label_list: - LOG.warning("device %s with label=%s not a" - "valid seed.", dev, label) + LOG.warning("device %s with label=%s not a" + "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 25db43e0..684c7473 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -32,8 +32,8 @@ preserve_hostname: false {% if variant in ["freebsd"] %} # This should not be required, but leave it in place until the real cause of -# not beeing able to find -any- datasources is resolved. -datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] +# not finding -any- datasources is resolved. +datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] {% endif %} # Example datasource config # datasource: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index b785362f..18bea0b9 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -278,6 +278,24 @@ class TestNoCloudDataSource(CiTestCase): self.assertEqual(netconf, dsrc.network_config) self.assertNotIn(gateway, str(dsrc.network_config)) + @mock.patch("cloudinit.util.blkid") + def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + self.mocks.enter_context( + mock.patch.object(util, 'is_FreeBSD', return_value=True)) + + self.mocks.enter_context( + mock.patch.object(os.path, 'exists', return_value=True)) + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc._get_devices('foo') + self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) + fake_blkid.assert_not_called() + class TestParseCommandLineData(CiTestCase): -- cgit v1.2.3 From 6197c347c3960254dbcdb28eb73989d062ad9689 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Tue, 28 May 2019 15:39:48 +0000 Subject: freebsd: ability to grow root file system - UFS file system support - GPT partition table support - add support for newfs's -L parameter (label) - move freebsd specific test from Azure to freebsd --- cloudinit/config/cc_growpart.py | 3 +- cloudinit/config/cc_resizefs.py | 6 +-- cloudinit/util.py | 22 ++++++----- tests/unittests/test_datasource/test_azure.py | 24 ------------ tests/unittests/test_distros/test_freebsd.py | 45 ++++++++++++++++++++++ .../test_handler/test_handler_resizefs.py | 2 +- 6 files changed, 64 insertions(+), 38 deletions(-) create mode 100644 tests/unittests/test_distros/test_freebsd.py (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index bafca9d8..564f376f 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -215,7 +215,8 @@ def device_part_info(devpath): # FreeBSD doesn't know of sysfs so just get everything we need from # the device, like /dev/vtbd0p2. if util.is_FreeBSD(): - m = re.search('^(/dev/.+)p([0-9])$', devpath) + freebsd_part = "/dev/" + util.find_freebsd_part(devpath) + m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) return (m.group(1), m.group(2)) if not os.path.exists(syspath): diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 076b9d5a..afd2e060 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth): def _resize_ufs(mount_point, devpth): - return ('growfs', '-y', devpth) + return ('growfs', '-y', mount_point) def _resize_zfs(mount_point, devpth): @@ -101,7 +101,7 @@ def _can_skip_resize_ufs(mount_point, devpth): """ # dumpfs -m / # newfs command for / (/dev/label/rootfs) - newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 + newfs -L rootf -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf """ cur_fs_sz = None @@ -110,7 +110,7 @@ def _can_skip_resize_ufs(mount_point, devpth): for line in dumpfs_res.splitlines(): if not line.startswith('#'): newfs_cmd = shlex.split(line) - opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' + opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:L:' optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value) for o, a in optlist: if o == "-s": diff --git a/cloudinit/util.py b/cloudinit/util.py index ea4199cd..aa23b3f3 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2337,17 +2337,21 @@ def parse_mtab(path): return None -def find_freebsd_part(label_part): - if label_part.startswith("/dev/label/"): - target_label = label_part[5:] - (label_part, _err) = subp(['glabel', 'status', '-s']) - for labels in label_part.split("\n"): +def find_freebsd_part(fs): + splitted = fs.split('/') + if len(splitted) == 3: + return splitted[2] + elif splitted[2] in ['label', 'gpt', 'ufs']: + target_label = fs[5:] + (part, _err) = subp(['glabel', 'status', '-s']) + for labels in part.split("\n"): items = labels.split() - if len(items) > 0 and items[0].startswith(target_label): - label_part = items[2] + if len(items) > 0 and items[0] == target_label: + part = items[2] break - label_part = str(label_part) - return label_part + return str(part) + else: + LOG.warning("Unexpected input in find_freebsd_part: %s", fs) def get_path_dev_freebsd(path, mnt_list): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 427ab7e7..afb614e4 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -6,7 +6,6 @@ from cloudinit import url_helper from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, - find_freebsd_part, get_path_dev_freebsd, MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( @@ -391,29 +390,6 @@ scbus-1 on xpt0 bus 0 dev = ds.get_resource_disk_on_freebsd(1) self.assertEqual("da1", dev) - @mock.patch('cloudinit.util.subp') - def test_find_freebsd_part_on_Azure(self, mock_subp): - glabel_out = ''' -gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 - label/rootfs N/A da0p2 - label/swap N/A da0p3 -''' - mock_subp.return_value = (glabel_out, "") - res = find_freebsd_part("/dev/label/rootfs") - self.assertEqual("da0p2", res) - - def test_get_path_dev_freebsd_on_Azure(self): - mnt_list = ''' -/dev/label/rootfs / ufs rw 1 1 -devfs /dev devfs rw,multilabel 0 0 -fdescfs /dev/fd fdescfs rw 0 0 -/dev/da1s1 /mnt/resource ufs rw 2 2 -''' - with mock.patch.object(os.path, 'exists', - return_value=True): - res = get_path_dev_freebsd('/etc', mnt_list) - self.assertIsNotNone(res) - @mock.patch(MOCKPATH + '_is_platform_viable') def test_call_is_platform_viable_seed(self, m_is_platform_viable): """Check seed_dir using _is_platform_viable and return False.""" diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/test_distros/test_freebsd.py new file mode 100644 index 00000000..8af253a2 --- /dev/null +++ b/tests/unittests/test_distros/test_freebsd.py @@ -0,0 +1,45 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd) +from cloudinit.tests.helpers import (CiTestCase, mock) + +import os + + +class TestDeviceLookUp(CiTestCase): + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_label(self, mock_subp): + glabel_out = ''' +gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 + label/rootfs N/A da0p2 + label/swap N/A da0p3 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/label/rootfs") + self.assertEqual("da0p2", res) + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_gpt(self, mock_subp): + glabel_out = ''' + gpt/bootfs N/A vtbd0p1 +gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1 + gpt/swapfs N/A vtbd0p2 + gpt/rootfs N/A vtbd0p3 + iso9660/cidata N/A vtbd2 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/gpt/rootfs") + self.assertEqual("vtbd0p3", res) + + def test_get_path_dev_freebsd_label(self): + mnt_list = ''' +/dev/label/rootfs / ufs rw 1 1 +devfs /dev devfs rw,multilabel 0 0 +fdescfs /dev/fd fdescfs rw 0 0 +/dev/da1s1 /mnt/resource ufs rw 2 2 +''' + with mock.patch.object(os.path, 'exists', + return_value=True): + res = get_path_dev_freebsd('/etc', mnt_list) + self.assertIsNotNone(res) diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 35187847..db9a0414 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -147,7 +147,7 @@ class TestResizefs(CiTestCase): def test_resize_ufs_cmd_return(self): mount_point = '/' devpth = '/dev/sda2' - self.assertEqual(('growfs', '-y', devpth), + self.assertEqual(('growfs', '-y', mount_point), _resize_ufs(mount_point, devpth)) @mock.patch('cloudinit.util.is_container', return_value=False) -- cgit v1.2.3 From feebec1cbb462208003460d68d909e76cb68e0e2 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 25 Jun 2019 16:06:27 +0000 Subject: azure: add region and AZ properties from imds compute location metadata This allows cloud-init query region to show valid region data for Azure --- cloudinit/sources/DataSourceAzure.py | 9 +++++ tests/unittests/test_datasource/test_azure.py | 47 +++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 7 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b7440c1d..d2fad9bb 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -683,6 +683,11 @@ class DataSourceAzure(sources.DataSource): DS_CFG_KEY_PRESERVE_NTFS, False)) return + @property + def availability_zone(self): + return self.metadata.get( + 'imds', {}).get('compute', {}).get('platformFaultDomain') + @property def network_config(self): """Generate a network config like net.generate_fallback_network() with @@ -701,6 +706,10 @@ class DataSourceAzure(sources.DataSource): self._network_config = parse_network_config(nc_src) return self._network_config + @property + def region(self): + return self.metadata.get('imds', {}).get('compute', {}).get('location') + def _partitions_on_device(devpath, maxnum=16): # return a list of tuples (ptnum, path) for each part on devpath diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index afb614e4..f27ef21b 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -84,6 +84,25 @@ def construct_valid_ovf_env(data=None, pubkeys=None, NETWORK_METADATA = { + "compute": { + "location": "eastus2", + "name": "my-hostname", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "srugroup1", + "sku": "19.04-DAILY", + "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", + "tags": "", + "version": "19.04.201906190", + "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", + "vmScaleSetName": "", + "vmSize": "Standard_DS1_v2", + "zone": "" + }, "network": { "interface": [ { @@ -478,13 +497,7 @@ scbus-1 on xpt0 bus 0 expected_metadata = { 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, - 'imds': {'network': {'interface': [{ - 'ipv4': {'ipAddress': [ - {'privateIpAddress': '10.0.0.4', - 'publicIpAddress': '104.46.124.81'}], - 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, - 'ipv6': {'ipAddress': []}, - 'macAddress': '000D3A047598'}]}}, + 'imds': NETWORK_METADATA, 'instance-id': 'test-instance-id', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -612,6 +625,26 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) + def test_availability_zone_set_from_imds(self): + """Datasource.availability returns IMDS platformFaultDomain.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('0', dsrc.availability_zone) + + def test_region_set_from_imds(self): + """Datasource.region returns IMDS region location.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('eastus2', dsrc.region) + def test_user_cfg_set_agent_command(self): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} -- cgit v1.2.3 From 07b17236be5665bb552c7460102bcd07bf8f2be8 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Tue, 16 Jul 2019 22:40:15 +0000 Subject: net: add rfc3442 (classless static routes) to EphemeralDHCP The EphemeralDHCP context manager did not parse or handle rfc3442 classless static routes which prevented reading datasource metadata in some clouds. This branch adds support for extracting the field from the leases output, parsing the format and then adding the required iproute2 ip commands to apply (and teardown) the static routes. LP: #1821102 --- cloudinit/net/__init__.py | 34 +++++++- cloudinit/net/dhcp.py | 90 +++++++++++++++++++ cloudinit/net/tests/test_dhcp.py | 120 +++++++++++++++++++++++++- cloudinit/net/tests/test_init.py | 39 +++++++++ tests/unittests/test_datasource/test_azure.py | 6 +- tests/unittests/test_datasource/test_ec2.py | 3 +- 6 files changed, 286 insertions(+), 6 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index e758006f..624c9b42 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -679,7 +679,7 @@ class EphemeralIPv4Network(object): """ def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None, - connectivity_url=None): + connectivity_url=None, static_routes=None): """Setup context manager and validate call signature. @param interface: Name of the network interface to bring up. @@ -690,6 +690,7 @@ class EphemeralIPv4Network(object): @param router: Optionally the default gateway IP. @param connectivity_url: Optionally, a URL to verify if a usable connection already exists. + @param static_routes: Optionally a list of static routes from DHCP """ if not all([interface, ip, prefix_or_mask, broadcast]): raise ValueError( @@ -706,6 +707,7 @@ class EphemeralIPv4Network(object): self.ip = ip self.broadcast = broadcast self.router = router + self.static_routes = static_routes self.cleanup_cmds = [] # List of commands to run to cleanup state. def __enter__(self): @@ -718,7 +720,21 @@ class EphemeralIPv4Network(object): return self._bringup_device() - if self.router: + + # rfc3442 requires us to ignore the router config *if* classless static + # routes are provided. + # + # https://tools.ietf.org/html/rfc3442 + # + # If the DHCP server returns both a Classless Static Routes option and + # a Router option, the DHCP client MUST ignore the Router option. + # + # Similarly, if the DHCP server returns both a Classless Static Routes + # option and a Static Routes option, the DHCP client MUST ignore the + # Static Routes option. + if self.static_routes: + self._bringup_static_routes() + elif self.router: self._bringup_router() def __exit__(self, excp_type, excp_value, excp_traceback): @@ -762,6 +778,20 @@ class EphemeralIPv4Network(object): ['ip', '-family', 'inet', 'addr', 'del', cidr, 'dev', self.interface]) + def _bringup_static_routes(self): + # static_routes = [("169.254.169.254/32", "130.56.248.255"), + # ("0.0.0.0/0", "130.56.240.1")] + for net_address, gateway in self.static_routes: + via_arg = [] + if gateway != "0.0.0.0/0": + via_arg = ['via', gateway] + util.subp( + ['ip', '-4', 'route', 'add', net_address] + via_arg + + ['dev', self.interface], capture=True) + self.cleanup_cmds.insert( + 0, ['ip', '-4', 'route', 'del', net_address] + via_arg + + ['dev', self.interface]) + def _bringup_router(self): """Perform the ip commands to fully setup the router if needed.""" # Check if a default route exists and exit if it does diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index c98a97cd..17379918 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -92,10 +92,14 @@ class EphemeralDHCPv4(object): nmap = {'interface': 'interface', 'ip': 'fixed-address', 'prefix_or_mask': 'subnet-mask', 'broadcast': 'broadcast-address', + 'static_routes': 'rfc3442-classless-static-routes', 'router': 'routers'} kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()]) if not kwargs['broadcast']: kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) + if kwargs['static_routes']: + kwargs['static_routes'] = ( + parse_static_routes(kwargs['static_routes'])) if self.connectivity_url: kwargs['connectivity_url'] = self.connectivity_url ephipv4 = EphemeralIPv4Network(**kwargs) @@ -272,4 +276,90 @@ def networkd_get_option_from_leases(keyname, leases_d=None): return data[keyname] return None + +def parse_static_routes(rfc3442): + """ parse rfc3442 format and return a list containing tuple of strings. + + The tuple is composed of the network_address (including net length) and + gateway for a parsed static route. + + @param rfc3442: string in rfc3442 format + @returns: list of tuple(str, str) for all valid parsed routes until the + first parsing error. + + E.g. + sr = parse_state_routes("32,169,254,169,254,130,56,248,255,0,130,56,240,1") + sr = [ + ("169.254.169.254/32", "130.56.248.255"), ("0.0.0.0/0", "130.56.240.1") + ] + + Python version of isc-dhclient's hooks: + /etc/dhcp/dhclient-exit-hooks.d/rfc3442-classless-routes + """ + # raw strings from dhcp lease may end in semi-colon + rfc3442 = rfc3442.rstrip(";") + tokens = rfc3442.split(',') + static_routes = [] + + def _trunc_error(cidr, required, remain): + msg = ("RFC3442 string malformed. Current route has CIDR of %s " + "and requires %s significant octets, but only %s remain. " + "Verify DHCP rfc3442-classless-static-routes value: %s" + % (cidr, required, remain, rfc3442)) + LOG.error(msg) + + current_idx = 0 + for idx, tok in enumerate(tokens): + if idx < current_idx: + continue + net_length = int(tok) + if net_length in range(25, 33): + req_toks = 9 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+5]) + gateway = ".".join(tokens[idx+5:idx+req_toks]) + current_idx = idx + req_toks + elif net_length in range(17, 25): + req_toks = 8 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+4] + ["0"]) + gateway = ".".join(tokens[idx+4:idx+req_toks]) + current_idx = idx + req_toks + elif net_length in range(9, 17): + req_toks = 7 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+3] + ["0", "0"]) + gateway = ".".join(tokens[idx+3:idx+req_toks]) + current_idx = idx + req_toks + elif net_length in range(1, 9): + req_toks = 6 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+2] + ["0", "0", "0"]) + gateway = ".".join(tokens[idx+2:idx+req_toks]) + current_idx = idx + req_toks + elif net_length == 0: + req_toks = 5 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = "0.0.0.0" + gateway = ".".join(tokens[idx+1:idx+req_toks]) + current_idx = idx + req_toks + else: + LOG.error('Parsed invalid net length "%s". Verify DHCP ' + 'rfc3442-classless-static-routes value.', net_length) + return static_routes + + static_routes.append(("%s/%s" % (net_address, net_length), gateway)) + + return static_routes + # vi: ts=4 expandtab diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 51390249..91f503c9 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -8,7 +8,8 @@ from textwrap import dedent import cloudinit.net as net from cloudinit.net.dhcp import ( InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, - parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases) + parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases, + parse_static_routes) from cloudinit.util import ensure_file, write_file from cloudinit.tests.helpers import ( CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call) @@ -64,6 +65,123 @@ class TestParseDHCPLeasesFile(CiTestCase): self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) +class TestDHCPRFC3442(CiTestCase): + + def test_parse_lease_finds_rfc3442_classless_static_routes(self): + """parse_dhcp_lease_file returns rfc3442-classless-static-routes.""" + lease_file = self.tmp_path('leases') + content = dedent(""" + lease { + interface "wlp3s0"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + option rfc3442-classless-static-routes 0,130,56,240,1; + renew 4 2017/07/27 18:02:30; + expire 5 2017/07/28 07:08:15; + } + """) + expected = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'rfc3442-classless-static-routes': '0,130,56,240,1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + write_file(lease_file, content) + self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) + + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4): + """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network""" + lease = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'rfc3442-classless-static-routes': '0,130,56,240,1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + m_maybe.return_value = lease + eph = net.dhcp.EphemeralDHCPv4() + eph.obtain_lease() + expected_kwargs = { + 'interface': 'wlp3s0', + 'ip': '192.168.2.74', + 'prefix_or_mask': '255.255.255.0', + 'broadcast': '192.168.2.255', + 'static_routes': [('0.0.0.0/0', '130.56.240.1')], + 'router': '192.168.2.1'} + m_ipv4.assert_called_with(**expected_kwargs) + + +class TestDHCPParseStaticRoutes(CiTestCase): + + with_logs = True + + def parse_static_routes_empty_string(self): + self.assertEqual([], parse_static_routes("")) + + def test_parse_static_routes_invalid_input_returns_empty_list(self): + rfc3442 = "32,169,254,169,254,130,56,248" + self.assertEqual([], parse_static_routes(rfc3442)) + + def test_parse_static_routes_bogus_width_returns_empty_list(self): + rfc3442 = "33,169,254,169,254,130,56,248" + self.assertEqual([], parse_static_routes(rfc3442)) + + def test_parse_static_routes_single_ip(self): + rfc3442 = "32,169,254,169,254,130,56,248,255" + self.assertEqual([('169.254.169.254/32', '130.56.248.255')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_single_ip_handles_trailing_semicolon(self): + rfc3442 = "32,169,254,169,254,130,56,248,255;" + self.assertEqual([('169.254.169.254/32', '130.56.248.255')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_default_route(self): + rfc3442 = "0,130,56,240,1" + self.assertEqual([('0.0.0.0/0', '130.56.240.1')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_class_c_b_a(self): + class_c = "24,192,168,74,192,168,0,4" + class_b = "16,172,16,172,16,0,4" + class_a = "8,10,10,0,0,4" + rfc3442 = ",".join([class_c, class_b, class_a]) + self.assertEqual(sorted([ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ("10.0.0.0/8", "10.0.0.4") + ]), sorted(parse_static_routes(rfc3442))) + + def test_parse_static_routes_logs_error_truncated(self): + bad_rfc3442 = { + "class_c": "24,169,254,169,10", + "class_b": "16,172,16,10", + "class_a": "8,10,10", + "gateway": "0,0", + "netlen": "33,0", + } + for rfc3442 in bad_rfc3442.values(): + self.assertEqual([], parse_static_routes(rfc3442)) + + logs = self.logs.getvalue() + self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines())) + + def test_parse_static_routes_returns_valid_routes_until_parse_err(self): + class_c = "24,192,168,74,192,168,0,4" + class_b = "16,172,16,172,16,0,4" + class_a_error = "8,10,10,0,0" + rfc3442 = ",".join([class_c, class_b, class_a_error]) + self.assertEqual(sorted([ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ]), sorted(parse_static_routes(rfc3442))) + + logs = self.logs.getvalue() + self.assertIn(rfc3442, logs.splitlines()[0]) + + class TestDHCPDiscoveryClean(CiTestCase): with_logs = True diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 6d2affe7..d393e6ad 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -549,6 +549,45 @@ class TestEphemeralIPV4Network(CiTestCase): self.assertEqual(expected_setup_calls, m_subp.call_args_list) m_subp.assert_has_calls(expected_teardown_calls) + def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', + 'static_routes': [('169.254.169.254/32', '192.168.2.1'), + ('0.0.0.0/0', '192.168.2.1')], + 'router': '192.168.2.1'} + expected_setup_calls = [ + mock.call( + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', + 'broadcast', '192.168.2.255', 'dev', 'eth0'], + capture=True, update_env={'LANG': 'C'}), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], + capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '169.254.169.254/32', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '0.0.0.0/0', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] + expected_teardown_calls = [ + mock.call( + ['ip', '-4', 'route', 'del', '0.0.0.0/0', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'del', '169.254.169.254/32', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', + 'eth0', 'down'], capture=True), + mock.call( + ['ip', '-family', 'inet', 'addr', 'del', + '192.168.2.2/24', 'dev', 'eth0'], capture=True) + ] + with net.EphemeralIPv4Network(**params): + self.assertEqual(expected_setup_calls, m_subp.call_args_list) + m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls) + class TestApplyNetworkCfgNames(CiTestCase): V1_CONFIG = textwrap.dedent("""\ diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index f27ef21b..2de2aea2 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1807,7 +1807,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1') + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) self.assertEqual(m_net.call_count, 2) def test__reprovision_calls__poll_imds(self, fake_resp, @@ -1845,7 +1846,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1') + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) self.assertEqual(m_net.call_count, 2) diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 20d59bfd..1ec8e009 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -538,7 +538,8 @@ class TestEc2(test_helpers.HttprettyTestCase): m_dhcp.assert_called_once_with('eth9') m_net.assert_called_once_with( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1') + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) self.assertIn('Crawl of metadata service took', self.logs.getvalue()) -- cgit v1.2.3 From 1dbede64dc645b090b4047a105143b5d5090d214 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 23 Jul 2019 22:07:11 +0000 Subject: stages: allow data sources to override network config source order Currently, if a platform provides any network configuration via the "cmdline" method (i.e. network-data=... on the kernel command line, ip=... on the kernel command line, or iBFT config via /run/net-*.conf), the value of the data source's network_config property is completely ignored. This means that on platforms that use iSCSI boot (such as Oracle Compute Infrastructure), there is no way for the data source to configure any network interfaces other than those that have already been configured by the initramfs. This change allows data sources to specify the order in which network configuration sources are considered. Data sources that opt to use this mechanism will be expected to consume the command line network data and integrate it themselves. (The generic merging of network configuration sources was considered, but we concluded that the single use case we have presently (a) didn't warrant the increased complexity, and (b) didn't give us a broad enough view to be sure that our generic implementation would be sufficiently generic. This change in no way precludes a merging strategy in future.) --- cloudinit/sources/__init__.py | 16 ++++++ cloudinit/stages.py | 37 ++++++++++---- cloudinit/tests/test_stages.py | 71 ++++++++++++++++++++++---- tests/unittests/test_datasource/test_common.py | 11 ++++ 4 files changed, 116 insertions(+), 19 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6966b31..9d249366 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -66,6 +66,13 @@ CLOUD_ID_REGION_PREFIX_MAP = { 'china': ('azure-china', lambda c: c == 'azure'), # only change azure } +# NetworkConfigSource represents the canonical list of network config sources +# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton +# namedtuple as an enum; see https://stackoverflow.com/a/6971002) +_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback') +NetworkConfigSource = namedtuple('NetworkConfigSource', + _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) + class DataSourceNotFoundException(Exception): pass @@ -153,6 +160,15 @@ class DataSource(object): # Track the discovered fallback nic for use in configuration generation. _fallback_interface = None + # The network configuration sources that should be considered for this data + # source. (The first source in this list that provides network + # configuration will be used without considering any that follow.) This + # should always be a subset of the members of NetworkConfigSource with no + # duplicate entries. + network_config_sources = (NetworkConfigSource.cmdline, + NetworkConfigSource.system_cfg, + NetworkConfigSource.ds) + # read_url_params url_max_wait = -1 # max_wait < 0 means do not wait url_timeout = 10 # timeout for each metadata url read attempt diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5f9d47b9..6bcda2d1 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -24,6 +24,7 @@ from cloudinit.handlers.shell_script import ShellScriptPartHandler from cloudinit.handlers.upstart_job import UpstartJobPartHandler from cloudinit.event import EventType +from cloudinit.sources import NetworkConfigSource from cloudinit import cloud from cloudinit import config @@ -630,19 +631,37 @@ class Init(object): if os.path.exists(disable_file): return (None, disable_file) - cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config()) - dscfg = ('ds', None) + available_cfgs = { + NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(), + NetworkConfigSource.ds: None, + NetworkConfigSource.system_cfg: self.cfg.get('network'), + } + if self.datasource and hasattr(self.datasource, 'network_config'): - dscfg = ('ds', self.datasource.network_config) - sys_cfg = ('system_cfg', self.cfg.get('network')) + available_cfgs[NetworkConfigSource.ds] = ( + self.datasource.network_config) - for loc, ncfg in (cmdline_cfg, sys_cfg, dscfg): + if self.datasource: + order = self.datasource.network_config_sources + else: + order = sources.DataSource.network_config_sources + for cfg_source in order: + if not hasattr(NetworkConfigSource, cfg_source): + LOG.warning('data source specifies an invalid network' + ' cfg_source: %s', cfg_source) + continue + if cfg_source not in available_cfgs: + LOG.warning('data source specifies an unavailable network' + ' cfg_source: %s', cfg_source) + continue + ncfg = available_cfgs[cfg_source] if net.is_disabled_cfg(ncfg): - LOG.debug("network config disabled by %s", loc) - return (None, loc) + LOG.debug("network config disabled by %s", cfg_source) + return (None, cfg_source) if ncfg: - return (ncfg, loc) - return (self.distro.generate_fallback_config(), "fallback") + return (ncfg, cfg_source) + return (self.distro.generate_fallback_config(), + NetworkConfigSource.fallback) def _apply_netcfg_names(self, netcfg): try: diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index 9b483121..7e13e29d 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -6,6 +6,7 @@ import os from cloudinit import stages from cloudinit import sources +from cloudinit.sources import NetworkConfigSource from cloudinit.event import EventType from cloudinit.util import write_file @@ -63,7 +64,7 @@ class TestInit(CiTestCase): """find_networking_config returns when disabled by kernel cmdline.""" m_cmdline.return_value = {'config': 'disabled'} self.assertEqual( - (None, 'cmdline'), + (None, NetworkConfigSource.cmdline), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by cmdline\n', self.logs.getvalue()) @@ -78,7 +79,7 @@ class TestInit(CiTestCase): self.init.datasource = FakeDataSource( network_config={'config': 'disabled'}) self.assertEqual( - (None, 'ds'), + (None, NetworkConfigSource.ds), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by ds\n', self.logs.getvalue()) @@ -90,11 +91,61 @@ class TestInit(CiTestCase): self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {'config': 'disabled'}} self.assertEqual( - (None, 'system_cfg'), + (None, NetworkConfigSource.system_cfg), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by system_cfg\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_uses_datasrc_order(self, m_cmdline): + """find_networking_config should check sources in DS defined order""" + # cmdline, which would normally be preferred over other sources, + # disables networking; in this case, though, the DS moves cmdline later + # so its own config is preferred + m_cmdline.return_value = {'config': 'disabled'} + + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.ds, NetworkConfigSource.system_cfg, + NetworkConfigSource.cmdline] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_invalid_src( + self, m_cmdline): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + 'invalid_src', NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an invalid network' + ' cfg_source: invalid_src', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( + self, m_cmdline): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.fallback, NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an unavailable network' + ' cfg_source: fallback', + self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') def test_wb__find_networking_config_returns_kernel(self, m_cmdline): """find_networking_config returns kernel cmdline config if present.""" @@ -105,7 +156,7 @@ class TestInit(CiTestCase): self.init.datasource = FakeDataSource( network_config={'config': ['fakedatasource']}) self.assertEqual( - (expected_cfg, 'cmdline'), + (expected_cfg, NetworkConfigSource.cmdline), self.init._find_networking_config()) @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') @@ -118,7 +169,7 @@ class TestInit(CiTestCase): self.init.datasource = FakeDataSource( network_config={'config': ['fakedatasource']}) self.assertEqual( - (expected_cfg, 'system_cfg'), + (expected_cfg, NetworkConfigSource.system_cfg), self.init._find_networking_config()) @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') @@ -129,7 +180,7 @@ class TestInit(CiTestCase): expected_cfg = {'config': ['fakedatasource']} self.init.datasource = FakeDataSource(network_config=expected_cfg) self.assertEqual( - (expected_cfg, 'ds'), + (expected_cfg, NetworkConfigSource.ds), self.init._find_networking_config()) @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') @@ -148,7 +199,7 @@ class TestInit(CiTestCase): distro = self.init.distro distro.generate_fallback_config = fake_generate_fallback self.assertEqual( - (fake_cfg, 'fallback'), + (fake_cfg, NetworkConfigSource.fallback), self.init._find_networking_config()) self.assertNotIn('network config disabled', self.logs.getvalue()) @@ -177,7 +228,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} @@ -199,7 +250,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback self.init._find_networking_config = fake_network_config self.init.apply_network_config(True) @@ -223,7 +274,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 6b01a4ea..2a9cfb29 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -83,4 +83,15 @@ class ExpectedDataSources(test_helpers.TestCase): self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) +class TestDataSourceInvariants(test_helpers.TestCase): + + def test_data_sources_have_valid_network_config_sources(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + for cfg_src in ds.network_config_sources: + fail_msg = ('{} has an invalid network_config_sources entry:' + ' {}'.format(str(ds), cfg_src)) + self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), + fail_msg) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 4dfed67d0e82970f8717d0b524c593962698ca4f Mon Sep 17 00:00:00 2001 From: Chris Glass Date: Thu, 8 Aug 2019 17:09:57 +0000 Subject: New data source for the Exoscale.com cloud platform - dsidentify switches to the new Exoscale datasource on matching DMI name - New Exoscale datasource added Signed-off-by: Mathieu Corbin --- cloudinit/apport.py | 1 + cloudinit/settings.py | 1 + cloudinit/sources/DataSourceExoscale.py | 258 +++++++++++++++++++++++ doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/exoscale.rst | 68 ++++++ tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_exoscale.py | 203 ++++++++++++++++++ tools/ds-identify | 7 +- 8 files changed, 540 insertions(+), 1 deletion(-) create mode 100644 cloudinit/sources/DataSourceExoscale.py create mode 100644 doc/rtd/topics/datasources/exoscale.rst create mode 100644 tests/unittests/test_datasource/test_exoscale.py (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 22cb7fde..003ff1ff 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -23,6 +23,7 @@ KNOWN_CLOUD_NAMES = [ 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', + 'Exoscale', 'Hetzner Cloud', 'IBM - (aka SoftLayer or BlueMix)', 'LXD', diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b1ebaade..2060d81f 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -39,6 +39,7 @@ CFG_BUILTIN = { 'Hetzner', 'IBMCloud', 'Oracle', + 'Exoscale', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py new file mode 100644 index 00000000..52e7f6f6 --- /dev/null +++ b/cloudinit/sources/DataSourceExoscale.py @@ -0,0 +1,258 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import ec2_utils as ec2 +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import url_helper +from cloudinit import util + +LOG = logging.getLogger(__name__) + +METADATA_URL = "http://169.254.169.254" +API_VERSION = "1.0" +PASSWORD_SERVER_PORT = 8080 + +URL_TIMEOUT = 10 +URL_RETRIES = 6 + +EXOSCALE_DMI_NAME = "Exoscale" + +BUILTIN_DS_CONFIG = { + # We run the set password config module on every boot in order to enable + # resetting the instance's password via the exoscale console (and a + # subsequent instance reboot). + 'cloud_config_modules': [["set-passwords", "always"]] +} + + +class DataSourceExoscale(sources.DataSource): + + dsname = 'Exoscale' + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) + LOG.debug("Initializing the Exoscale datasource") + + self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL) + self.api_version = self.ds_cfg.get('api_version', API_VERSION) + self.password_server_port = int( + self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) + self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) + self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) + + self.extra_config = BUILTIN_DS_CONFIG + + def wait_for_metadata_service(self): + """Wait for the metadata service to be reachable.""" + + metadata_url = "{}/{}/meta-data/instance-id".format( + self.metadata_url, self.api_version) + + url = url_helper.wait_for_url( + urls=[metadata_url], + max_wait=self.url_max_wait, + timeout=self.url_timeout, + status_cb=LOG.critical) + + return bool(url) + + def crawl_metadata(self): + """ + Crawl the metadata service when available. + + @returns: Dictionary of crawled metadata content. + """ + metadata_ready = util.log_time( + logfunc=LOG.info, + msg='waiting for the metadata service', + func=self.wait_for_metadata_service) + + if not metadata_ready: + return {} + + return read_metadata(self.metadata_url, self.api_version, + self.password_server_port, self.url_timeout, + self.url_retries) + + def _get_data(self): + """Fetch the user data, the metadata and the VM password + from the metadata service. + + Please refer to the datasource documentation for details on how the + metadata server and password server are crawled. + """ + if not self._is_platform_viable(): + return False + + data = util.log_time( + logfunc=LOG.debug, + msg='Crawl of metadata service', + func=self.crawl_metadata) + + if not data: + return False + + self.userdata_raw = data['user-data'] + self.metadata = data['meta-data'] + password = data.get('password') + + password_config = {} + if password: + # Since we have a password, let's make sure we are allowed to use + # it by allowing ssh_pwauth. + # The password module's default behavior is to leave the + # configuration as-is in this regard, so that means it will either + # leave the password always disabled if no password is ever set, or + # leave the password login enabled if we set it once. + password_config = { + 'ssh_pwauth': True, + 'password': password, + 'chpasswd': { + 'expire': False, + }, + } + + # builtin extra_config overrides password_config + self.extra_config = util.mergemanydict( + [self.extra_config, password_config]) + + return True + + def get_config_obj(self): + return self.extra_config + + def _is_platform_viable(self): + return util.read_dmi_data('system-product-name').startswith( + EXOSCALE_DMI_NAME) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +def get_password(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Obtain the VM's password if set. + + Once fetched the password is marked saved. Future calls to this method may + return empty string or 'saved_password'.""" + password_url = "{}:{}/{}/".format(metadata_url, password_server_port, + api_version) + response = url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "send_my_password"}, + timeout=url_timeout, + retries=url_retries) + password = response.contents.decode('utf-8') + # the password is empty or already saved + # Note: the original metadata server would answer an additional + # 'bad_request' status, but the Exoscale implementation does not. + if password in ['', 'saved_password']: + return None + # save the password + url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "saved_password"}, + timeout=url_timeout, + retries=url_retries) + return password + + +def read_metadata(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Query the metadata server and return the retrieved data.""" + crawled_metadata = {} + crawled_metadata['_metadata_api_version'] = api_version + try: + crawled_metadata['user-data'] = ec2.get_instance_userdata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + crawled_metadata['meta-data'] = ec2.get_instance_metadata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + except Exception as e: + util.logexc(LOG, "failed reading from metadata url %s (%s)", + metadata_url, e) + return {} + + try: + crawled_metadata['password'] = get_password( + api_version=api_version, + metadata_url=metadata_url, + password_server_port=password_server_port, + url_retries=url_retries, + url_timeout=url_timeout) + except Exception as e: + util.logexc(LOG, "failed to read from password server url %s:%s (%s)", + metadata_url, password_server_port, e) + + return crawled_metadata + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='Query Exoscale Metadata') + parser.add_argument( + "--endpoint", + metavar="URL", + help="The url of the metadata service.", + default=METADATA_URL) + parser.add_argument( + "--version", + metavar="VERSION", + help="The version of the metadata endpoint to query.", + default=API_VERSION) + parser.add_argument( + "--retries", + metavar="NUM", + type=int, + help="The number of retries querying the endpoint.", + default=URL_RETRIES) + parser.add_argument( + "--timeout", + metavar="NUM", + type=int, + help="The time in seconds to wait before timing out.", + default=URL_TIMEOUT) + parser.add_argument( + "--password-port", + metavar="PORT", + type=int, + help="The port on which the password endpoint listens", + default=PASSWORD_SERVER_PORT) + + args = parser.parse_args() + + data = read_metadata( + metadata_url=args.endpoint, + api_version=args.version, + password_server_port=args.password_port, + url_timeout=args.timeout, + url_retries=args.retries) + + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 648c6068..2148cd5e 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -155,6 +155,7 @@ Follow for more information. datasources/configdrive.rst datasources/digitalocean.rst datasources/ec2.rst + datasources/exoscale.rst datasources/maas.rst datasources/nocloud.rst datasources/opennebula.rst diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst new file mode 100644 index 00000000..27aec9cd --- /dev/null +++ b/doc/rtd/topics/datasources/exoscale.rst @@ -0,0 +1,68 @@ +.. _datasource_exoscale: + +Exoscale +======== + +This datasource supports reading from the metadata server used on the +`Exoscale platform `_. + +Use of the Exoscale datasource is recommended to benefit from new features of +the Exoscale platform. + +The datasource relies on the availability of a compatible metadata server +(``http://169.254.169.254`` is used by default) and its companion password +server, reachable at the same address (by default on port 8080). + +Crawling of metadata +-------------------- + +The metadata service and password server are crawled slightly differently: + + * The "metadata service" is crawled every boot. + * The password server is also crawled every boot (the Exoscale datasource + forces the password module to run with "frequency always"). + +In the password server case, the following rules apply in order to enable the +"restore instance password" functionality: + + * If a password is returned by the password server, it is then marked "saved" + by the cloud-init datasource. Subsequent boots will skip setting the password + (the password server will return "saved_password"). + * When the instance password is reset (via the Exoscale UI), the password + server will return the non-empty password at next boot, therefore causing + cloud-init to reset the instance's password. + +Configuration +------------- + +Users of this datasource are discouraged from changing the default settings +unless instructed to by Exoscale support. + +The following settings are available and can be set for the datasource in system +configuration (in `/etc/cloud/cloud.cfg.d/`). + +The settings available are: + + * **metadata_url**: The URL for the metadata service (defaults to + ``http://169.254.169.254``) + * **api_version**: The API version path on which to query the instance metadata + (defaults to ``1.0``) + * **password_server_port**: The port (on the metadata server) on which the + password server listens (defaults to ``8080``). + * **timeout**: the timeout value provided to urlopen for each individual http + request. (defaults to ``10``) + * **retries**: The number of retries that should be done for an http request + (defaults to ``6``) + + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + Exoscale: + metadata_url: "http://169.254.169.254" + api_version: "1.0" + password_server_port: 8080 + timeout: 10 + retries: 6 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 2a9cfb29..61a7a762 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -13,6 +13,7 @@ from cloudinit.sources import ( DataSourceConfigDrive as ConfigDrive, DataSourceDigitalOcean as DigitalOcean, DataSourceEc2 as Ec2, + DataSourceExoscale as Exoscale, DataSourceGCE as GCE, DataSourceHetzner as Hetzner, DataSourceIBMCloud as IBMCloud, @@ -53,6 +54,7 @@ DEFAULT_NETWORK = [ CloudStack.DataSourceCloudStack, DSNone.DataSourceNone, Ec2.DataSourceEc2, + Exoscale.DataSourceExoscale, GCE.DataSourceGCE, MAAS.DataSourceMAAS, NoCloud.DataSourceNoCloudNet, diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py new file mode 100644 index 00000000..350c3304 --- /dev/null +++ b/tests/unittests/test_datasource/test_exoscale.py @@ -0,0 +1,203 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import helpers +from cloudinit.sources.DataSourceExoscale import ( + API_VERSION, + DataSourceExoscale, + METADATA_URL, + get_password, + PASSWORD_SERVER_PORT, + read_metadata) +from cloudinit.tests.helpers import HttprettyTestCase, mock + +import httpretty +import requests + + +TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, + PASSWORD_SERVER_PORT, + API_VERSION) + +TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, + API_VERSION) + +TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, + API_VERSION) + + +@httpretty.activate +class TestDatasourceExoscale(HttprettyTestCase): + + def setUp(self): + super(TestDatasourceExoscale, self).setUp() + self.tmp = self.tmp_dir() + self.password_url = TEST_PASSWORD_URL + self.metadata_url = TEST_METADATA_URL + self.userdata_url = TEST_USERDATA_URL + + def test_password_saved(self): + """The password is not set when it is not found + in the metadata service.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="saved_password") + self.assertFalse(get_password()) + + def test_password_empty(self): + """No password is set if the metadata service returns + an empty string.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="") + self.assertFalse(get_password()) + + def test_password(self): + """The password is set to what is found in the metadata + service.""" + expected_password = "p@ssw0rd" + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + password = get_password() + self.assertEqual(expected_password, password) + + def test_get_data(self): + """The datasource conforms to expected behavior when supplied + full test data.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_password = "p@ssw0rd" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'ssh_pwauth': True, + 'password': expected_password, + 'cloud_config_modules': [ + ["set-passwords", "always"]], + 'chpasswd': { + 'expire': False, + }}) + + def test_get_data_saved_password(self): + """The datasource conforms to expected behavior when saved_password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "saved_password" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'cloud_config_modules': [ + ["set-passwords", "always"]]}) + + def test_get_data_no_password(self): + """The datasource conforms to expected behavior when no password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'cloud_config_modules': [ + ["set-passwords", "always"]]}) + + @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') + def test_read_metadata_when_password_server_unreachable(self, m_password): + """The read_metadata function returns partial results in case the + password server (only) is unreachable.""" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + + m_password.side_effect = requests.Timeout('Fake Connection Timeout') + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + + result = read_metadata() + + self.assertIsNone(result.get("password")) + self.assertEqual(result.get("user-data").decode("utf-8"), + expected_userdata) + + def test_non_viable_platform(self): + """The datasource fails fast when the platform is not viable.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: False + self.assertFalse(ds._get_data()) diff --git a/tools/ds-identify b/tools/ds-identify index 0305e361..e0d4865c 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -553,6 +553,11 @@ dscheck_CloudStack() { return $DS_NOT_FOUND } +dscheck_Exoscale() { + dmi_product_name_matches "Exoscale*" && return $DS_FOUND + return $DS_NOT_FOUND +} + dscheck_CloudSigma() { # http://paste.ubuntu.com/23624795/ dmi_product_name_matches "CloudSigma" && return $DS_FOUND -- cgit v1.2.3 From 155847209e6a3ed5face91a133d8488a703f3f93 Mon Sep 17 00:00:00 2001 From: Rick Wright Date: Fri, 9 Aug 2019 17:11:05 +0000 Subject: Add support for publishing host keys to GCE guest attributes This adds an empty publish_host_keys() method to the default datasource that is called by cc_ssh.py. This feature can be controlled by the 'ssh_publish_hostkeys' config option. It is enabled by default but can be disabled by setting 'enabled' to false. Also, a blacklist of key types is supported. In addition, this change implements ssh_publish_hostkeys() for the GCE datasource, attempting to write the hostkeys to the instance's guest attributes. Using these hostkeys for ssh connections is currently supported by the alpha version of Google's 'gcloud' command-line tool. (On Google Compute Engine, this feature will be enabled by setting the 'enable-guest-attributes' metadata key to 'true' for the project/instance that you would like to use this feature for. When connecting to the instance for the first time using 'gcloud compute ssh' the hostkeys will be read from the guest attributes for the instance and written to the user's local known_hosts file for Google Compute Engine instances.) --- cloudinit/config/cc_ssh.py | 55 +++++++++ cloudinit/config/tests/test_ssh.py | 166 ++++++++++++++++++++++++++++ cloudinit/sources/DataSourceGCE.py | 22 +++- cloudinit/sources/__init__.py | 10 ++ cloudinit/url_helper.py | 9 +- tests/unittests/test_datasource/test_gce.py | 18 +++ 6 files changed, 274 insertions(+), 6 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index f8f7cb35..53f69399 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -91,6 +91,9 @@ public keys. ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ... - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ... + ssh_publish_hostkeys: + enabled: (Defaults to true) + blacklist: (Defaults to [dsa]) """ import glob @@ -104,6 +107,10 @@ from cloudinit import util GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' +PUBLISH_HOST_KEYS = True +# Don't publish the dsa hostkey by default since OpenSSH recommends not using +# it. +HOST_KEY_PUBLISH_BLACKLIST = ['dsa'] CONFIG_KEY_TO_FILE = {} PRIV_TO_PUB = {} @@ -176,6 +183,23 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Failed generating key type %s to " "file %s", keytype, keyfile) + if "ssh_publish_hostkeys" in cfg: + host_key_blacklist = util.get_cfg_option_list( + cfg["ssh_publish_hostkeys"], "blacklist", + HOST_KEY_PUBLISH_BLACKLIST) + publish_hostkeys = util.get_cfg_option_bool( + cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS) + else: + host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST + publish_hostkeys = PUBLISH_HOST_KEYS + + if publish_hostkeys: + hostkeys = get_public_host_keys(blacklist=host_key_blacklist) + try: + cloud.datasource.publish_host_keys(hostkeys) + except Exception as e: + util.logexc(log, "Publishing host keys failed!") + try: (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) @@ -209,4 +233,35 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): ssh_util.setup_user_keys(keys, 'root', options=key_prefix) + +def get_public_host_keys(blacklist=None): + """Read host keys from /etc/ssh/*.pub files and return them as a list. + + @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa'] + @returns: List of keys, each formatted as a two-element tuple. + e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')] + """ + public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,) + key_list = [] + blacklist_files = [] + if blacklist: + # Convert blacklist to filenames: + # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub' + blacklist_files = [public_key_file_tmpl % (key_type,) + for key_type in blacklist] + # Get list of public key files and filter out blacklisted files. + file_list = [hostfile for hostfile + in glob.glob(public_key_file_tmpl % ('*',)) + if hostfile not in blacklist_files] + + # Read host key files, retrieve first two fields as a tuple and + # append that tuple to key_list. + for file_name in file_list: + file_contents = util.load_file(file_name) + key_data = file_contents.split() + if key_data and len(key_data) > 1: + key_list.append(tuple(key_data[:2])) + return key_list + + # vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py index c8a4271f..e7789842 100644 --- a/cloudinit/config/tests/test_ssh.py +++ b/cloudinit/config/tests/test_ssh.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import os.path from cloudinit.config import cc_ssh from cloudinit import ssh_util @@ -12,6 +13,25 @@ MODPATH = "cloudinit.config.cc_ssh." class TestHandleSsh(CiTestCase): """Test cc_ssh handling of ssh config.""" + def _publish_hostkey_test_setup(self): + self.test_hostkeys = { + 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), + 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), + 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), + 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), + } + self.test_hostkey_files = [] + hostkey_tmpdir = self.tmp_dir() + for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']: + key_data = self.test_hostkeys[key_type] + filename = 'ssh_host_%s_key.pub' % key_type + filepath = os.path.join(hostkey_tmpdir, filename) + self.test_hostkey_files.append(filepath) + with open(filepath, 'w') as f: + f.write(' '.join(key_data)) + + cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') + def test_apply_credentials_with_user(self, m_setup_keys): """Apply keys for the given user and root.""" keys = ["key1"] @@ -64,6 +84,7 @@ class TestHandleSsh(CiTestCase): # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ([], {}) + cc_ssh.PUBLISH_HOST_KEYS = False cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) cc_ssh.handle("name", cfg, cloud, None, None) @@ -149,3 +170,148 @@ class TestHandleSsh(CiTestCase): self.assertEqual([mock.call(set(keys), user), mock.call(set(keys), "root", options="")], m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_default( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_enable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = False + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_disable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': False}} + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) + cloud.datasource.publish_host_keys.assert_not_called() + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': ['dsa', 'rsa']}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_empty_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': []}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['dsa', 'ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index d8162623..6cbfbbac 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -18,10 +18,13 @@ LOG = logging.getLogger(__name__) MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/' BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL} REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes') +HOSTKEY_NAMESPACE = 'hostkeys' +HEADERS = {'Metadata-Flavor': 'Google'} class GoogleMetadataFetcher(object): - headers = {'Metadata-Flavor': 'Google'} def __init__(self, metadata_address): self.metadata_address = metadata_address @@ -32,7 +35,7 @@ class GoogleMetadataFetcher(object): url = self.metadata_address + path if is_recursive: url += '/?recursive=True' - resp = url_helper.readurl(url=url, headers=self.headers) + resp = url_helper.readurl(url=url, headers=HEADERS) except url_helper.UrlError as exc: msg = "url %s raised exception %s" LOG.debug(msg, path, exc) @@ -90,6 +93,10 @@ class DataSourceGCE(sources.DataSource): public_keys_data = self.metadata['public-keys-data'] return _parse_public_keys(public_keys_data, self.default_user) + def publish_host_keys(self, hostkeys): + for key in hostkeys: + _write_host_key_to_guest_attributes(*key) + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): # GCE has long FDQN's and has asked for short hostnames. return self.metadata['local-hostname'].split('.')[0] @@ -103,6 +110,17 @@ class DataSourceGCE(sources.DataSource): return self.availability_zone.rsplit('-', 1)[0] +def _write_host_key_to_guest_attributes(key_type, key_value): + url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type) + key_value = key_value.encode('utf-8') + resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS, + request_method='PUT', check_status=False) + if resp.ok(): + LOG.debug('Wrote %s host key to guest attributes.', key_type) + else: + LOG.debug('Unable to write %s host key to guest attributes.', key_type) + + def _has_expired(public_key): # Check whether an SSH key is expired. Public key input is a single SSH # public key in the GCE specific key format documented here: diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index c2baccd5..a319322b 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -491,6 +491,16 @@ class DataSource(object): def get_public_ssh_keys(self): return normalize_pubkey_data(self.metadata.get('public-keys')) + def publish_host_keys(self, hostkeys): + """Publish the public SSH host keys (found in /etc/ssh/*.pub). + + @param hostkeys: List of host key tuples (key_type, key_value), + where key_type is the first field in the public key file + (e.g. 'ssh-rsa') and key_value is the key itself + (e.g. 'AAAAB3NzaC1y...'). + """ + pass + def _remap_device(self, short_name): # LP: #611137 # the metadata service may believe that devices are named 'sda' diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0af0d9e3..44ee61d4 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -199,18 +199,19 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, check_status=True, allow_redirects=True, exception_cb=None, - session=None, infinite=False, log_req_resp=True): + session=None, infinite=False, log_req_resp=True, + request_method=None): url = _cleanurl(url) req_args = { 'url': url, } req_args.update(_get_ssl_args(url, ssl_details)) req_args['allow_redirects'] = allow_redirects - req_args['method'] = 'GET' + if not request_method: + request_method = 'POST' if data else 'GET' + req_args['method'] = request_method if timeout is not None: req_args['timeout'] = max(float(timeout), 0) - if data: - req_args['method'] = 'POST' # It doesn't seem like config # was added in older library versions (or newer ones either), thus we # need to manually do the retries if it wasn't... diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 41176c6a..67744d32 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -55,6 +55,8 @@ GCE_USER_DATA_TEXT = { HEADERS = {'Metadata-Flavor': 'Google'} MD_URL_RE = re.compile( r'http://metadata.google.internal/computeMetadata/v1/.*') +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes/hostkeys/') def _set_mock_metadata(gce_meta=None): @@ -341,4 +343,20 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): public_key_data, default_user='default') self.assertEqual(sorted(found), sorted(expected)) + @mock.patch("cloudinit.url_helper.readurl") + def test_publish_host_keys(self, m_readurl): + hostkeys = [('ssh-rsa', 'asdfasdf'), + ('ssh-ed25519', 'qwerqwer')] + readurl_expected_calls = [ + mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), + mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), + ] + self.ds.publish_host_keys(hostkeys) + m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 7f674256c1426ffc419fd6b13e66a58754d94939 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 13 Aug 2019 20:13:05 +0000 Subject: azure/net: generate_fallback_nic emits network v2 config instead of v1 The function generate_fallback_config is used by Azure by default when not consuming IMDS configuration data. This function is also used by any datasource which does not implement it's own network config. This simple fallback configuration sets up dhcp on the most likely NIC. It will now emit network v2 instead of network v1. This is a step toward moving all components talking in v2 and allows us to avoid costly conversions between v1 and v2 for newer distributions which rely on netplan. --- cloudinit/net/__init__.py | 31 +++++--------- cloudinit/net/network_state.py | 12 ++++-- cloudinit/net/tests/test_init.py | 19 +++++---- cloudinit/sources/DataSourceAzure.py | 7 +++- tests/unittests/test_datasource/test_azure.py | 59 ++++++++++++++++++++++++++- tests/unittests/test_net.py | 41 +++++++++++++++++-- 6 files changed, 130 insertions(+), 39 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index f3cec794..ea707c09 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -265,32 +265,23 @@ def find_fallback_nic(blacklist_drivers=None): def generate_fallback_config(blacklist_drivers=None, config_driver=None): - """Determine which attached net dev is most likely to have a connection and - generate network state to run dhcp on that interface""" - + """Generate network cfg v2 for dhcp on the NIC most likely connected.""" if not config_driver: config_driver = False target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers) - if target_name: - target_mac = read_sys_net_safe(target_name, 'address') - nconf = {'config': [], 'version': 1} - cfg = {'type': 'physical', 'name': target_name, - 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} - # inject the device driver name, dev_id into config if enabled and - # device has a valid device driver value - if config_driver: - driver = device_driver(target_name) - if driver: - cfg['params'] = { - 'driver': driver, - 'device_id': device_devid(target_name), - } - nconf['config'].append(cfg) - return nconf - else: + if not target_name: # can't read any interfaces addresses (or there are none); give up return None + target_mac = read_sys_net_safe(target_name, 'address') + cfg = {'dhcp4': True, 'set-name': target_name, + 'match': {'macaddress': target_mac.lower()}} + if config_driver: + driver = device_driver(target_name) + if driver: + cfg['match']['driver'] = driver + nconf = {'ethernets': {target_name: cfg}, 'version': 2} + return nconf def extract_physdevs(netcfg): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 0ca576b6..c0c415d0 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -596,6 +596,7 @@ class NetworkStateInterpreter(object): eno1: match: macaddress: 00:11:22:33:44:55 + driver: hv_netsvc wakeonlan: true dhcp4: true dhcp6: false @@ -631,15 +632,18 @@ class NetworkStateInterpreter(object): 'type': 'physical', 'name': cfg.get('set-name', eth), } - mac_address = cfg.get('match', {}).get('macaddress', None) + match = cfg.get('match', {}) + mac_address = match.get('macaddress', None) if not mac_address: LOG.debug('NetworkState Version2: missing "macaddress" info ' 'in config entry: %s: %s', eth, str(cfg)) - phy_cmd.update({'mac_address': mac_address}) - + phy_cmd['mac_address'] = mac_address + driver = match.get('driver', None) + if driver: + phy_cmd['params'] = {'driver': driver} for key in ['mtu', 'match', 'wakeonlan']: if key in cfg: - phy_cmd.update({key: cfg.get(key)}) + phy_cmd[key] = cfg[key] subnets = self._v2_to_v1_ipcfg(cfg) if len(subnets) > 0: diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index e6e77d7a..d2e38f00 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -212,9 +212,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth1', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth1': {'match': {'macaddress': mac}, + 'dhcp4': True, 'set-name': 'eth1'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_dormant_eth_with_mac(self): @@ -223,9 +223,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True, + 'set-name': 'eth0'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_eth_by_operstate(self): @@ -233,9 +233,10 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': { + 'eth0': {'dhcp4': True, 'match': {'macaddress': mac}, + 'set-name': 'eth0'}}, + 'version': 2} valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown'] for state in valid_operstates: write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d2fad9bb..e6ed2f3b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1241,7 +1241,7 @@ def parse_network_config(imds_metadata): privateIpv4 = addr4['privateIpAddress'] if privateIpv4: if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 + # Append static address config for ip > 1 netPrefix = intf['ipv4']['subnet'][0].get( 'prefix', '24') if not dev_config.get('addresses'): @@ -1251,6 +1251,11 @@ def parse_network_config(imds_metadata): ip=privateIpv4, prefix=netPrefix)) else: dev_config['dhcp4'] = True + # non-primary interfaces should have a higher + # route-metric (cost) so default routes prefer + # primary nic due to lower route-metric value + dev_config['dhcp4-overrides'] = { + 'route-metric': (idx + 1) * 100} for addr6 in intf['ipv6']['ipAddress']: privateIpv6 = addr6['privateIpAddress'] if privateIpv6: diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 2de2aea2..4d57cebc 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -12,6 +12,7 @@ from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, ExitStack, resourceLocation) +import copy import crypt import httpretty import json @@ -129,6 +130,26 @@ NETWORK_METADATA = { } } +SECONDARY_INTERFACE = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.1.5", + } + ] + } +} + MOCKPATH = 'cloudinit.sources.DataSourceAzure.' @@ -619,8 +640,43 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp4': True}}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + def test_network_config_set_from_imds_route_metric_for_secondary_nic(self): + """Datasource.network_config adds route-metric to secondary nics.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + + self.m_get_metadata_from_imds.return_value = imds_data dsrc = self._get_ds(data) dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) @@ -925,6 +981,7 @@ scbus-1 on xpt0 bus 0 expected_cfg = { 'ethernets': { 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}}, 'version': 2} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 1840ade0..4f7e4207 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2156,7 +2156,7 @@ DEFAULT_DEV_ATTRS = { "carrier": False, "dormant": False, "operstate": "down", - "address": "07-1C-C6-75-A4-BE", + "address": "07-1c-c6-75-a4-be", "device/driver": None, "device/device": None, "name_assign_type": "4", @@ -2204,6 +2204,39 @@ class TestGenerateFallbackConfig(CiTestCase): "cloudinit.util.get_cmdline", "m_get_cmdline", return_value="root=/dev/sda1") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_device_driver_v2(self, mock_get_devicelist, mock_read_sys_net, + mock_sys_dev_path): + """Network configuration for generate_fallback_config is version 2.""" + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': '4'}, + 'eth1': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + + } + + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + + network_cfg = net.generate_fallback_config(config_driver=True) + expected = { + 'ethernets': {'eth0': {'dhcp4': True, 'set-name': 'eth0', + 'match': {'macaddress': '00:11:22:33:44:55', + 'driver': 'hv_netsvc'}}}, + 'version': 2} + self.assertEqual(expected, network_cfg) + @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") @@ -2486,7 +2519,7 @@ class TestRhelSysConfigRendering(CiTestCase): # BOOTPROTO=dhcp DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE +HWADDR=07-1c-c6-75-a4-be NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -3030,7 +3063,7 @@ class TestOpenSuseSysConfigRendering(CiTestCase): # BOOTPROTO=dhcp DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE +HWADDR=07-1c-c6-75-a4-be NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -3342,13 +3375,13 @@ class TestNetplanNetRendering(CiTestCase): expected = """ network: - version: 2 ethernets: eth1000: dhcp4: true match: macaddress: 07-1c-c6-75-a4-be set-name: eth1000 + version: 2 """ self.assertEqual(expected.lstrip(), contents.lstrip()) self.assertEqual(1, mock_clean_default.call_count) -- cgit v1.2.3 From 2f3bb764626b9065f4102c7c0a67998a9c174444 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 14 Aug 2019 21:03:13 +0000 Subject: Azure: Record boot timestamps, system information, and diagnostic events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Collect and record the following information through KVP:  + timestamps related to kernel initialization and systemd activation    of cloud-init services  + system information including cloud-init version, kernel version,    distro version, and python version  + diagnostic events for the most common provisioning error issues    such as empty dhcp lease, corrupted ovf-env.xml, etc. + increasing the log frequency of polling IMDS during reprovision. --- cloudinit/sources/DataSourceAzure.py | 157 ++++++++++++++++++++----- cloudinit/sources/helpers/azure.py | 160 ++++++++++++++++++++++++-- tests/unittests/test_datasource/test_azure.py | 15 ++- tests/unittests/test_reporting_hyperv.py | 65 +++++++++++ 4 files changed, 353 insertions(+), 44 deletions(-) mode change 100755 => 100644 tests/unittests/test_reporting_hyperv.py (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e6ed2f3b..4984fa84 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -26,9 +26,14 @@ from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util from cloudinit.reporting import events -from cloudinit.sources.helpers.azure import (azure_ds_reporter, - azure_ds_telemetry_reporter, - get_metadata_from_fabric) +from cloudinit.sources.helpers.azure import ( + azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric, + get_boot_telemetry, + get_system_info, + report_diagnostic_event, + EphemeralDHCPv4WithReporting) LOG = logging.getLogger(__name__) @@ -354,7 +359,7 @@ class DataSourceAzure(sources.DataSource): bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] LOG.debug("ssh authentication: " - "using fingerprint from fabirc") + "using fingerprint from fabric") with events.ReportEventStack( name="waiting-for-ssh-public-key", @@ -419,12 +424,17 @@ class DataSourceAzure(sources.DataSource): ret = load_azure_ds_dir(cdev) except NonAzureDataSource: + report_diagnostic_event( + "Did not find Azure data source in %s" % cdev) continue except BrokenAzureDataSource as exc: msg = 'BrokenAzureDataSource: %s' % exc + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) except util.MountFailedError: - LOG.warning("%s was not mountable", cdev) + msg = '%s was not mountable' % cdev + report_diagnostic_event(msg) + LOG.warning(msg) continue perform_reprovision = reprovision or self._should_reprovision(ret) @@ -432,6 +442,7 @@ class DataSourceAzure(sources.DataSource): if util.is_FreeBSD(): msg = "Free BSD is not supported for PPS VMs" LOG.error(msg) + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( @@ -450,7 +461,9 @@ class DataSourceAzure(sources.DataSource): break if not found: - raise sources.InvalidMetaDataException('No Azure metadata found') + msg = 'No Azure metadata found' + report_diagnostic_event(msg) + raise sources.InvalidMetaDataException(msg) if found == ddir: LOG.debug("using files cached in %s", ddir) @@ -469,9 +482,14 @@ class DataSourceAzure(sources.DataSource): self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral else: - with EphemeralDHCPv4() as lease: - self._report_ready(lease=lease) - + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter) as lease: + self._report_ready(lease=lease) + except Exception as e: + report_diagnostic_event( + "exception while reporting ready: %s" % e) + raise return crawled_data def _is_platform_viable(self): @@ -492,6 +510,16 @@ class DataSourceAzure(sources.DataSource): """ if not self._is_platform_viable(): return False + try: + get_boot_telemetry() + except Exception as e: + LOG.warning("Failed to get boot telemetry: %s", e) + + try: + get_system_info() + except Exception as e: + LOG.warning("Failed to get system information: %s", e) + try: crawled_data = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', @@ -551,27 +579,55 @@ class DataSourceAzure(sources.DataSource): headers = {"Metadata": "true"} nl_sock = None report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) + self.imds_logging_threshold = 1 + self.imds_poll_counter = 1 + dhcp_attempts = 0 + vnet_switched = False + return_val = None def exc_cb(msg, exception): if isinstance(exception, UrlError) and exception.code == 404: + if self.imds_poll_counter == self.imds_logging_threshold: + # Reducing the logging frequency as we are polling IMDS + self.imds_logging_threshold *= 2 + LOG.debug("Call to IMDS with arguments %s failed " + "with status code %s after %s retries", + msg, exception.code, self.imds_poll_counter) + LOG.debug("Backing off logging threshold for the same " + "exception to %d", self.imds_logging_threshold) + self.imds_poll_counter += 1 return True + # If we get an exception while trying to call IMDS, we # call DHCP and setup the ephemeral network to acquire the new IP. + LOG.debug("Call to IMDS with arguments %s failed with " + "status code %s", msg, exception.code) + report_diagnostic_event("polling IMDS failed with exception %s" + % exception.code) return False LOG.debug("Wait for vnetswitch to happen") while True: try: - # Save our EphemeralDHCPv4 context so we avoid repeated dhcp - self._ephemeral_dhcp_ctx = EphemeralDHCPv4() - lease = self._ephemeral_dhcp_ctx.obtain_lease() + # Save our EphemeralDHCPv4 context to avoid repeated dhcp + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=azure_ds_reporter): + self._ephemeral_dhcp_ctx = EphemeralDHCPv4() + lease = self._ephemeral_dhcp_ctx.obtain_lease() + + if vnet_switched: + dhcp_attempts += 1 if report_ready: try: nl_sock = netlink.create_bound_netlink_socket() except netlink.NetlinkCreateSocketError as e: + report_diagnostic_event(e) LOG.warning(e) self._ephemeral_dhcp_ctx.clean_network() - return + break + path = REPORTED_READY_MARKER_FILE LOG.info( "Creating a marker file to report ready: %s", path) @@ -579,17 +635,33 @@ class DataSourceAzure(sources.DataSource): pid=os.getpid(), time=time())) self._report_ready(lease=lease) report_ready = False - try: - netlink.wait_for_media_disconnect_connect( - nl_sock, lease['interface']) - except AssertionError as error: - LOG.error(error) - return + + with events.ReportEventStack( + name="wait-for-media-disconnect-connect", + description="wait for vnet switch", + parent=azure_ds_reporter): + try: + netlink.wait_for_media_disconnect_connect( + nl_sock, lease['interface']) + except AssertionError as error: + report_diagnostic_event(error) + LOG.error(error) + break + + vnet_switched = True self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, - headers=headers, exception_cb=exc_cb, - infinite=True, log_req_resp=False).contents + with events.ReportEventStack( + name="get-reprovision-data-from-imds", + description="get reprovision data from imds", + parent=azure_ds_reporter): + return_val = readurl(url, + timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, + exception_cb=exc_cb, + infinite=True, + log_req_resp=False).contents + break except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -598,6 +670,14 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + if vnet_switched: + report_diagnostic_event("attempted dhcp %d times after reuse" % + dhcp_attempts) + report_diagnostic_event("polled imds %d times after reuse" % + self.imds_poll_counter) + + return return_val + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ @@ -666,9 +746,12 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['agent_command']) try: fabric_data = metadata_func() - except Exception: + except Exception as e: + report_diagnostic_event( + "Error communicating with Azure fabric; You may experience " + "connectivity issues: %s" % e) LOG.warning( - "Error communicating with Azure fabric; You may experience." + "Error communicating with Azure fabric; You may experience " "connectivity issues.", exc_info=True) return False @@ -1027,7 +1110,9 @@ def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: - raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) + error_str = "Invalid ovf-env.xml: %s" % e + report_diagnostic_event(error_str) + raise BrokenAzureDataSource(error_str) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") @@ -1299,8 +1384,13 @@ def get_metadata_from_imds(fallback_nic, retries): if net.is_up(fallback_nic): return util.log_time(**kwargs) else: - with EphemeralDHCPv4(fallback_nic): - return util.log_time(**kwargs) + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter, fallback_nic): + return util.log_time(**kwargs) + except Exception as e: + report_diagnostic_event("exception while getting metadata: %s" % e) + raise @azure_ds_telemetry_reporter @@ -1313,11 +1403,14 @@ def _get_metadata_from_imds(retries): url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, retries=retries, exception_cb=retry_on_url_exc) except Exception as e: - LOG.debug('Ignoring IMDS instance metadata: %s', e) + msg = 'Ignoring IMDS instance metadata: %s' % e + report_diagnostic_event(msg) + LOG.debug(msg) return {} try: return util.load_json(str(response)) - except json.decoder.JSONDecodeError: + except json.decoder.JSONDecodeError as e: + report_diagnostic_event('non-json imds response' % e) LOG.warning( 'Ignoring non-json IMDS instance metadata: %s', str(response)) return {} @@ -1370,8 +1463,10 @@ def _is_platform_viable(seed_dir): asset_tag = util.read_dmi_data('chassis-asset-tag') if asset_tag == AZURE_CHASSIS_ASSET_TAG: return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag + LOG.debug(msg) + evt.description = msg + report_diagnostic_event(msg) if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): return True return False diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 82c4c8c4..f1fba175 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,7 +16,11 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit import version +from cloudinit import distros from cloudinit.reporting import events +from cloudinit.net.dhcp import EphemeralDHCPv4 +from datetime import datetime LOG = logging.getLogger(__name__) @@ -24,6 +28,10 @@ LOG = logging.getLogger(__name__) # value is applied if the endpoint can't be found within a lease file DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" +BOOT_EVENT_TYPE = 'boot-telemetry' +SYSTEMINFO_EVENT_TYPE = 'system-info' +DIAGNOSTIC_EVENT_TYPE = 'diagnostic' + azure_ds_reporter = events.ReportEventStack( name="azure-ds", description="initialize reporter for azure ds", @@ -40,6 +48,105 @@ def azure_ds_telemetry_reporter(func): return impl +@azure_ds_telemetry_reporter +def get_boot_telemetry(): + """Report timestamps related to kernel initialization and systemd + activation of cloud-init""" + if not distros.uses_systemd(): + raise RuntimeError( + "distro not using systemd, skipping boot telemetry") + + LOG.debug("Collecting boot telemetry") + try: + kernel_start = float(time.time()) - float(util.uptime()) + except ValueError: + raise RuntimeError("Failed to determine kernel start timestamp") + + try: + out, _ = util.subp(['/bin/systemctl', + 'show', '-p', + 'UserspaceTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + + if not tsm: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd") + + user_start = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd: %s" + % e) + + try: + out, _ = util.subp(['/bin/systemctl', 'show', + 'cloud-init-local', '-p', + 'InactiveExitTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + if not tsm: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd") + + cloudinit_activation = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd: %s" + % e) + + evt = events.ReportingEvent( + BOOT_EVENT_TYPE, 'boot-telemetry', + "kernel_start=%s user_start=%s cloudinit_activation=%s" % + (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z', + datetime.utcfromtimestamp(user_start).isoformat() + 'Z', + datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'), + events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +@azure_ds_telemetry_reporter +def get_system_info(): + """Collect and report system information""" + info = util.system_info() + evt = events.ReportingEvent( + SYSTEMINFO_EVENT_TYPE, 'system information', + "cloudinit_version=%s, kernel_version=%s, variant=%s, " + "distro_name=%s, distro_version=%s, flavor=%s, " + "python_version=%s" % + (version.version_string(), info['release'], info['variant'], + info['dist'][0], info['dist'][1], info['dist'][2], + info['python']), events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +def report_diagnostic_event(str): + """Report a diagnostic event""" + evt = events.ReportingEvent( + DIAGNOSTIC_EVENT_TYPE, 'diagnostic message', + str, events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -360,16 +467,19 @@ class WALinuxAgentShim(object): value = dhcp245 LOG.debug("Using Azure Endpoint from dhcp options") if value is None: + report_diagnostic_event("No Azure endpoint from dhcp options") LOG.debug('Finding Azure endpoint from networkd...') value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/.json # a dhclient exit hook that calls cloud-init-dhclient-hook + report_diagnostic_event("No Azure endpoint from networkd") LOG.debug('Finding Azure endpoint from hook json...') dhcp_options = WALinuxAgentShim._load_dhclient_json() value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options) if value is None: # Fallback and check the leases file if unsuccessful + report_diagnostic_event("No Azure endpoint from dhclient logs") LOG.debug("Unable to find endpoint in dhclient logs. " " Falling back to check lease files") if fallback_lease_file is None: @@ -381,11 +491,15 @@ class WALinuxAgentShim(object): value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) if value is None: - LOG.warning("No lease found; using default endpoint") + msg = "No lease found; using default endpoint" + report_diagnostic_event(msg) + LOG.warning(msg) value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + msg = 'Azure endpoint found at %s' % endpoint_ip_address + report_diagnostic_event(msg) + LOG.debug(msg) return endpoint_ip_address @azure_ds_telemetry_reporter @@ -399,16 +513,19 @@ class WALinuxAgentShim(object): try: response = http_client.get( 'http://{0}/machine/?comp=goalstate'.format(self.endpoint)) - except Exception: + except Exception as e: if attempts < 10: time.sleep(attempts + 1) else: + report_diagnostic_event( + "failed to register with Azure: %s" % e) raise else: break attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) + report_diagnostic_event("container_id %s" % goal_state.container_id) ssh_keys = [] if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') @@ -449,11 +566,20 @@ class WALinuxAgentShim(object): container_id=goal_state.container_id, instance_id=goal_state.instance_id, ) - http_client.post( - "http://{0}/machine?comp=health".format(self.endpoint), - data=document, - extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, - ) + # Host will collect kvps when cloud-init reports ready. + # some kvps might still be in the queue. We yield the scheduler + # to make sure we process all kvps up till this point. + time.sleep(0) + try: + http_client.post( + "http://{0}/machine?comp=health".format(self.endpoint), + data=document, + extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, + ) + except Exception as e: + report_diagnostic_event("exception while reporting ready: %s" % e) + raise + LOG.info('Reported ready to Azure fabric.') @@ -467,4 +593,22 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, finally: shim.clean_up() + +class EphemeralDHCPv4WithReporting(object): + def __init__(self, reporter, nic=None): + self.reporter = reporter + self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic) + + def __enter__(self): + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=self.reporter): + return self.ephemeralDHCPv4.__enter__() + + def __exit__(self, excp_type, excp_value, excp_traceback): + self.ephemeralDHCPv4.__exit__( + excp_type, excp_value, excp_traceback) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 4d57cebc..3547dd94 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -181,7 +181,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): self.logs.getvalue()) @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') @mock.patch(MOCKPATH + 'net.is_up') def test_get_metadata_performs_dhcp_when_network_is_down( self, m_net_is_up, m_dhcp, m_readurl): @@ -195,7 +195,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): dsaz.get_metadata_from_imds('eth9', retries=2)) m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_called_with('eth9') + m_dhcp.assert_called_with(mock.ANY, 'eth9') self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time self.logs.getvalue()) @@ -552,7 +552,8 @@ scbus-1 on xpt0 bus 0 dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) - @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') @mock.patch( 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') @@ -1308,7 +1309,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) - def test_environment_correct_for_bounce_command(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_environment_correct_for_bounce_command( + self, mock_get_boot_telemetry): interface = 'int0' hostname = 'my-new-host' old_hostname = 'my-old-host' @@ -1324,7 +1327,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(hostname, bounce_env['hostname']) self.assertEqual(old_hostname, bounce_env['old_hostname']) - def test_default_bounce_command_ifup_used_by_default(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_default_bounce_command_ifup_used_by_default( + self, mock_get_boot_telemetry): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py old mode 100755 new mode 100644 index d01ed5b3..640895a4 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -7,9 +7,12 @@ import json import os import struct import time +import re +import mock from cloudinit import util from cloudinit.tests.helpers import CiTestCase +from cloudinit.sources.helpers import azure class TestKvpEncoding(CiTestCase): @@ -126,3 +129,65 @@ class TextKvpReporter(CiTestCase): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) kvps = list(reporter._iterate_kvps(0)) self.assertEqual(0, len(kvps)) + + @mock.patch('cloudinit.distros.uses_systemd') + @mock.patch('cloudinit.util.subp') + def test_get_boot_telemetry(self, m_subp, m_sysd): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" + r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)" + + # get_boot_telemetry makes two subp calls to systemctl. We provide + # a list of values that the subp calls should return + m_subp.side_effect = [ + ('UserspaceTimestampMonotonic=1844838', ''), + ('InactiveExitTimestampMonotonic=3068203', '')] + m_sysd.return_value = True + + reporter.publish_event(azure.get_boot_telemetry()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + evt_msg = kvps[0]['value'] + if not re.search("kernel_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing kernel_start timestamp") + if not re.search("user_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing user_start timestamp") + if not re.search("cloudinit_activation=" + datetime_pattern, + evt_msg): + raise AssertionError( + "missing cloudinit_activation timestamp") + + def test_get_system_info(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + pattern = r"[^=\s]+" + + reporter.publish_event(azure.get_system_info()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + # the most important information is cloudinit version, + # kernel_version, and the distro variant. It is ok if + # if the rest is not available + if not re.search("cloudinit_version=" + pattern, evt_msg): + raise AssertionError("missing cloudinit_version string") + if not re.search("kernel_version=" + pattern, evt_msg): + raise AssertionError("missing kernel_version string") + if not re.search("variant=" + pattern, evt_msg): + raise AssertionError("missing distro variant string") + + def test_report_diagnostic_event(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + + reporter.publish_event( + azure.report_diagnostic_event("test_diagnostic")) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + if "test_diagnostic" not in evt_msg: + raise AssertionError("missing expected diagnostic message") -- cgit v1.2.3 From d1b022217a652c7a84d5430c9e571987864d3982 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 28 Aug 2019 00:58:16 +0000 Subject: exoscale: fix sysconfig cloud_config_modules overrides Make sure Exoscale supplements or overrides existing system config setting cloud_config_modules instead of replacing it with a one item list set-passords LP: #1841454 --- cloudinit/sources/DataSourceExoscale.py | 26 ++++++++++++++++-------- tests/unittests/test_datasource/test_exoscale.py | 24 ++++++++++++++-------- 2 files changed, 33 insertions(+), 17 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py index 52e7f6f6..fdfb4ed3 100644 --- a/cloudinit/sources/DataSourceExoscale.py +++ b/cloudinit/sources/DataSourceExoscale.py @@ -6,6 +6,7 @@ from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources +from cloudinit import helpers from cloudinit import url_helper from cloudinit import util @@ -20,13 +21,6 @@ URL_RETRIES = 6 EXOSCALE_DMI_NAME = "Exoscale" -BUILTIN_DS_CONFIG = { - # We run the set password config module on every boot in order to enable - # resetting the instance's password via the exoscale console (and a - # subsequent instance reboot). - 'cloud_config_modules': [["set-passwords", "always"]] -} - class DataSourceExoscale(sources.DataSource): @@ -42,8 +36,22 @@ class DataSourceExoscale(sources.DataSource): self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) - - self.extra_config = BUILTIN_DS_CONFIG + self.extra_config = {} + + def activate(self, cfg, is_new_instance): + """Adjust set-passwords module to run 'always' during each boot""" + # We run the set password config module on every boot in order to + # enable resetting the instance's password via the exoscale console + # (and a subsequent instance reboot). + # Exoscale password server only provides set-passwords user-data if + # a user has triggered a password reset. So calling that password + # service generally results in no additional cloud-config. + # TODO(Create util functions for overriding merged sys_cfg module freq) + mod = 'set_passwords' + sem_path = self.paths.get_ipath_cur('sem') + sem_helper = helpers.FileSemaphores(sem_path) + if sem_helper.clear('config_' + mod, None): + LOG.debug('Overriding module set-passwords with frequency always') def wait_for_metadata_service(self): """Wait for the metadata service to be reachable.""" diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py index 350c3304..f0061199 100644 --- a/tests/unittests/test_datasource/test_exoscale.py +++ b/tests/unittests/test_datasource/test_exoscale.py @@ -11,8 +11,10 @@ from cloudinit.sources.DataSourceExoscale import ( PASSWORD_SERVER_PORT, read_metadata) from cloudinit.tests.helpers import HttprettyTestCase, mock +from cloudinit import util import httpretty +import os import requests @@ -63,6 +65,18 @@ class TestDatasourceExoscale(HttprettyTestCase): password = get_password() self.assertEqual(expected_password, password) + def test_activate_removes_set_passwords_semaphore(self): + """Allow set_passwords to run every boot by removing the semaphore.""" + path = helpers.Paths({'cloud_dir': self.tmp}) + sem_dir = self.tmp_path('instance/sem', dir=self.tmp) + util.ensure_dir(sem_dir) + sem_file = os.path.join(sem_dir, 'config_set_passwords') + with open(sem_file, 'w') as stream: + stream.write('') + ds = DataSourceExoscale({}, None, path) + ds.activate(None, None) + self.assertFalse(os.path.exists(sem_file)) + def test_get_data(self): """The datasource conforms to expected behavior when supplied full test data.""" @@ -95,8 +109,6 @@ class TestDatasourceExoscale(HttprettyTestCase): self.assertEqual(ds.get_config_obj(), {'ssh_pwauth': True, 'password': expected_password, - 'cloud_config_modules': [ - ["set-passwords", "always"]], 'chpasswd': { 'expire': False, }}) @@ -130,9 +142,7 @@ class TestDatasourceExoscale(HttprettyTestCase): self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") self.assertEqual(ds.metadata, {"instance-id": expected_id, "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), - {'cloud_config_modules': [ - ["set-passwords", "always"]]}) + self.assertEqual(ds.get_config_obj(), {}) def test_get_data_no_password(self): """The datasource conforms to expected behavior when no password is @@ -163,9 +173,7 @@ class TestDatasourceExoscale(HttprettyTestCase): self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") self.assertEqual(ds.metadata, {"instance-id": expected_id, "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), - {'cloud_config_modules': [ - ["set-passwords", "always"]]}) + self.assertEqual(ds.get_config_obj(), {}) @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') def test_read_metadata_when_password_server_unreachable(self, m_password): -- cgit v1.2.3 From 45426d8d38a7224962867ba71f390cce653e0d17 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Wed, 11 Sep 2019 18:53:01 +0000 Subject: VMWware: add option into VMTools config to enable/disable custom script. VMWware customization already has support to run a custom script during the VM customization. Adding this option allows a VM administrator to disable the execution of customization scripts. If set the script will not execute and the customization status is set to GUESTCUST_ERROR_SCRIPT_DISABLED. --- cloudinit/sources/DataSourceOVF.py | 21 ++++++- .../sources/helpers/vmware/imc/guestcust_error.py | 1 + .../sources/helpers/vmware/imc/guestcust_util.py | 37 ++++++++++++ tests/unittests/test_datasource/test_ovf.py | 55 +++++++++++++++--- tests/unittests/test_vmware/test_guestcust_util.py | 65 ++++++++++++++++++++++ 5 files changed, 169 insertions(+), 10 deletions(-) create mode 100644 tests/unittests/test_vmware/test_guestcust_util.py (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index dd941d2e..b1561892 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -40,11 +40,15 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_state \ from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( enable_nics, get_nics_to_enable, - set_customization_status + set_customization_status, + get_tools_config ) LOG = logging.getLogger(__name__) +CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg" +GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts" + class DataSourceOVF(sources.DataSource): @@ -148,6 +152,21 @@ class DataSourceOVF(sources.DataSource): product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name + custScriptConfig = get_tools_config( + CONFGROUPNAME_GUESTCUSTOMIZATION, + GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, + "true") + if custScriptConfig.lower() == "false": + # Update the customization status if there is a + # custom script is disabled + if special_customization and customscript: + msg = "Custom script is disabled by VM Administrator" + LOG.debug(msg) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED) + raise RuntimeError(msg) + ccScriptsDir = os.path.join( self.paths.get_cpath("scripts"), "per-instance") diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py index db5a00dc..65ae7390 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -10,5 +10,6 @@ class GuestCustErrorEnum(object): """Specifies different errors of Guest Customization engine""" GUESTCUST_ERROR_SUCCESS = 0 + GUESTCUST_ERROR_SCRIPT_DISABLED = 6 # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index a590f323..eb78172e 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -7,6 +7,7 @@ import logging import os +import re import time from cloudinit import util @@ -117,4 +118,40 @@ def enable_nics(nics): logger.warning("Can't connect network interfaces after %d attempts", enableNicsWaitRetries) + +def get_tools_config(section, key, defaultVal): + """ Return the value of [section] key from VMTools configuration. + + @param section: String of section to read from VMTools config + @returns: String value from key in [section] or defaultVal if + [section] is not present or vmware-toolbox-cmd is + not installed. + """ + + if not util.which('vmware-toolbox-cmd'): + logger.debug( + 'vmware-toolbox-cmd not installed, returning default value') + return defaultVal + + retValue = defaultVal + cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key] + + try: + (outText, _) = util.subp(cmd) + m = re.match(r'([a-zA-Z0-9 ]+)=(.*)', outText) + if m: + retValue = m.group(2).strip() + logger.debug("Get tools config: [%s] %s = %s", + section, key, retValue) + else: + logger.debug( + "Tools config: [%s] %s is not found, return default value: %s", + section, key, retValue) + except util.ProcessExecutionError as e: + logger.error("Failed running %s[%s]", cmd, e.exit_code) + logger.exception(e) + + return retValue + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 349d54cc..a615470a 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -169,19 +169,56 @@ class TestDatasourceOVF(CiTestCase): MARKER-ID = 12345345 """) util.write_file(conf_file, conf_content) - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) + with mock.patch(MPATH + 'get_tools_config', return_value='true'): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) customscript = self.tmp_path('test-script', self.tdir) self.assertIn('Script %s not found!!' % customscript, str(context.exception)) + def test_get_data_cust_script_disabled(self): + """If custom script is disabled by VMware tools configuration, + raise a RuntimeError. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + # Prepare the custom sript + customscript = self.tmp_path('test-script', self.tdir) + util.write_file(customscript, "This is the post cust script") + + with mock.patch(MPATH + 'get_tools_config', return_value='false'): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(RuntimeError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + self.assertIn('Custom script is disabled by VM Administrator', + str(context.exception)) + def test_get_data_non_vmware_seed_platform_info(self): """Platform info properly reports when on non-vmware platforms.""" paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py new file mode 100644 index 00000000..b8fa9942 --- /dev/null +++ b/tests/unittests/test_vmware/test_guestcust_util.py @@ -0,0 +1,65 @@ +# Copyright (C) 2019 Canonical Ltd. +# Copyright (C) 2019 VMware INC. +# +# Author: Xiaofeng Wang +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + get_tools_config, +) +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestGuestCustUtil(CiTestCase): + def test_get_tools_config_not_installed(self): + """ + This test is designed to verify the behavior if vmware-toolbox-cmd + is not installed. + """ + with mock.patch.object(util, 'which', return_value=None): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') + + def test_get_tools_config_internal_exception(self): + """ + This test is designed to verify the behavior if internal exception + is raised. + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + with mock.patch.object(util, 'subp', + return_value=('key=value', b''), + side_effect=util.ProcessExecutionError( + "subp failed", exit_code=99)): + # verify return value is 'defaultVal', not 'value'. + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'defaultVal') + + def test_get_tools_config_normal(self): + """ + This test is designed to verify the value could be parsed from + key = value of the given [section] + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + # value is not blank + with mock.patch.object(util, 'subp', + return_value=('key = value ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'value') + # value is blank + with mock.patch.object(util, 'subp', + return_value=('key = ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + '') + # value contains = + with mock.patch.object(util, 'subp', + return_value=('key=Bar=Wark', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'Bar=Wark') + +# vi: ts=4 expandtab -- cgit v1.2.3 From 571f7c36e89f67f4c2d1cacfd8f9269bf864d560 Mon Sep 17 00:00:00 2001 From: Shixin Ruan Date: Wed, 18 Sep 2019 13:15:25 +0000 Subject: Add datasource for ZStack platform. Zstack platform provides a AWS Ec2 metadata service, and identifies their platform to the guest by setting the 'chassis asset tag' to a string that ends with '.zstack.io'. LP: #1841181 --- cloudinit/apport.py | 1 + cloudinit/sources/DataSourceEc2.py | 16 ++++++++++++- doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/zstack.rst | 36 +++++++++++++++++++++++++++++ tests/unittests/test_datasource/test_ec2.py | 28 ++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 9 +++++++- tools/ds-identify | 5 ++++ 7 files changed, 94 insertions(+), 2 deletions(-) create mode 100644 doc/rtd/topics/datasources/zstack.rst (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 003ff1ff..fde1f75b 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -37,6 +37,7 @@ KNOWN_CLOUD_NAMES = [ 'Scaleway', 'SmartOS', 'VMware', + 'ZStack', 'Other'] # Potentially clear text collected logs diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 10107456..6c72ace2 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -33,6 +33,7 @@ class CloudNames(object): ALIYUN = "aliyun" AWS = "aws" BRIGHTBOX = "brightbox" + ZSTACK = "zstack" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "unknown" @@ -477,10 +478,16 @@ def identify_brightbox(data): return CloudNames.BRIGHTBOX +def identify_zstack(data): + if data['asset_tag'].endswith('.zstack.io'): + return CloudNames.ZSTACK + + def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() - checks = (identify_aws, identify_brightbox, lambda x: CloudNames.UNKNOWN) + checks = (identify_aws, identify_brightbox, identify_zstack, + lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) @@ -498,6 +505,7 @@ def _collect_platform_data(): uuid: system-uuid from dmi or /sys/hypervisor uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) + asset_tag: 'dmidecode -s chassis-asset-tag' On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. @@ -520,6 +528,12 @@ def _collect_platform_data(): data['serial'] = serial.lower() + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag is None: + asset_tag = '' + + data['asset_tag'] = asset_tag.lower() + return data diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 8e58be97..a337c08c 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -45,6 +45,7 @@ The following is a list of documents for each supported datasource: datasources/oracle.rst datasources/ovf.rst datasources/smartos.rst + datasources/zstack.rst Creation diff --git a/doc/rtd/topics/datasources/zstack.rst b/doc/rtd/topics/datasources/zstack.rst new file mode 100644 index 00000000..36e60ffb --- /dev/null +++ b/doc/rtd/topics/datasources/zstack.rst @@ -0,0 +1,36 @@ +.. _datasource_zstack: + +ZStack +====== +ZStack platform provides a AWS Ec2 metadata service, but with different datasource identity. +More information about ZStack can be found at `ZStack `__. + +Discovery +--------- +To determine whether a vm running on ZStack platform, cloud-init checks DMI information +by 'dmidecode -s chassis-asset-tag', if the output ends with '.zstack.io', it's running +on ZStack platform: + + +Metadata +^^^^^^^^ +Same as EC2, instance metadata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/meta-data/ + instance-id + local-hostname + +Userdata +^^^^^^^^ +Same as EC2, instance userdata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/user-data/ + meta_data.json + user_data + password + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1ec8e009..6fabf258 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -662,4 +662,32 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): expected, ec2.convert_ec2_metadata_network_config(self.network_metadata)) + +class TesIdentifyPlatform(test_helpers.CiTestCase): + + def collmock(self, **kwargs): + """return non-special _collect_platform_data updated with changes.""" + unspecial = { + 'asset_tag': '3857-0037-2746-7462-1818-3997-77', + 'serial': 'H23-C4J3JV-R6', + 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', + 'uuid_source': 'dmi', + } + unspecial.update(**kwargs) + return unspecial + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack(self, m_collect): + """zstack should be identified if cassis-asset-tag ends in .zstack.io + """ + m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') + self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack_full_domain_only(self, m_collect): + """zstack asset-tag matching should match only on full domain boundary. + """ + m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index de87be29..7aeeb91c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -609,6 +609,10 @@ class TestDsIdentify(DsIdentifyBase): self.assertEqual(expected, [p for p in expected if p in toks], "path did not have expected tokens") + def test_zstack_is_ec2(self): + """EC2: chassis asset tag ends with 'zstack.io'""" + self._test_ds_found('Ec2-ZStack') + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -971,8 +975,11 @@ VALID_CFG = { {'name': 'blkid', 'ret': 2, 'out': ''}, ], 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'}, + }, + 'Ec2-ZStack': { + 'ds': 'Ec2', + 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, } - } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 2447d14f..f76f2a6e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -895,6 +895,11 @@ ec2_identify_platform() { *.brightbox.com) _RET="Brightbox"; return 0;; esac + local asset_tag="${DI_DMI_CHASSIS_ASSET_TAG}" + case "$asset_tag" in + *.zstack.io) _RET="ZStack"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" -- cgit v1.2.3 From d3b1c4ae6bd237a04ba5df4306ff38f752f72132 Mon Sep 17 00:00:00 2001 From: Adam Dobrawy Date: Fri, 4 Oct 2019 23:15:10 +0000 Subject: Add RbxCloud datasource --- cloudinit/sources/DataSourceRbxCloud.py | 250 ++++++++++++++++++++++++++++ doc/rtd/topics/datasources/rbxcloud.rst | 25 +++ tests/unittests/test_datasource/test_rbx.py | 208 +++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 17 +- tools/ds-identify | 7 +- 5 files changed, 505 insertions(+), 2 deletions(-) create mode 100644 cloudinit/sources/DataSourceRbxCloud.py create mode 100644 doc/rtd/topics/datasources/rbxcloud.rst create mode 100644 tests/unittests/test_datasource/test_rbx.py (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py new file mode 100644 index 00000000..9a8c3d5c --- /dev/null +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -0,0 +1,250 @@ +# Copyright (C) 2018 Warsaw Data Center +# +# Author: Malwina Leis +# Author: Grzegorz Brzeski +# Author: Adam Dobrawy +# +# This file is part of cloud-init. See LICENSE file for license information. +""" +This file contains code used to gather the user data passed to an +instance on rootbox / hyperone cloud platforms +""" +import errno +import os +import os.path + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit.event import EventType + +LOG = logging.getLogger(__name__) +ETC_HOSTS = '/etc/hosts' + + +def get_manage_etc_hosts(): + hosts = util.load_file(ETC_HOSTS, quiet=True) + if hosts: + LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False') + return False + LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True') + return True + + +def ip2int(addr): + parts = addr.split('.') + return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \ + (int(parts[2]) << 8) + int(parts[3]) + + +def int2ip(addr): + return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]]) + + +def _sub_arp(cmd): + """ + Uses the prefered cloud-init subprocess def of util.subp + and runs arping. Breaking this to a separate function + for later use in mocking and unittests + """ + return util.subp(['arping'] + cmd) + + +def gratuitous_arp(items, distro): + source_param = '-S' + if distro.name in ['fedora', 'centos', 'rhel']: + source_param = '-s' + for item in items: + _sub_arp([ + '-c', '2', + source_param, item['source'], + item['destination'] + ]) + + +def get_md(): + rbx_data = None + devices = [ + dev + for dev, bdata in util.blkid().items() + if bdata.get('LABEL', '').upper() == 'CLOUDMD' + ] + for device in devices: + try: + rbx_data = util.mount_cb( + device=device, + callback=read_user_data_callback, + mtype=['vfat', 'fat'] + ) + if rbx_data: + break + except OSError as err: + if err.errno != errno.ENOENT: + raise + except util.MountFailedError: + util.logexc(LOG, "Failed to mount %s when looking for user " + "data", device) + if not rbx_data: + util.logexc(LOG, "Failed to load metadata and userdata") + return False + return rbx_data + + +def generate_network_config(netadps): + """Generate network configuration + + @param netadps: A list of network adapter settings + + @returns: A dict containing network config + """ + return { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'eth{}'.format(str(i)), + 'mac_address': netadp['macaddress'].lower(), + 'subnets': [ + { + 'type': 'static', + 'address': ip['address'], + 'netmask': netadp['network']['netmask'], + 'control': 'auto', + 'gateway': netadp['network']['gateway'], + 'dns_nameservers': netadp['network']['dns'][ + 'nameservers'] + } for ip in netadp['ip'] + ], + } for i, netadp in enumerate(netadps) + ] + } + + +def read_user_data_callback(mount_dir): + """This callback will be applied by util.mount_cb() on the mounted + drive. + + @param mount_dir: String representing path of directory where mounted drive + is available + + @returns: A dict containing userdata, metadata and cfg based on metadata. + """ + meta_data = util.load_json( + text=util.load_file( + fname=os.path.join(mount_dir, 'cloud.json'), + decode=False + ) + ) + user_data = util.load_file( + fname=os.path.join(mount_dir, 'user.data'), + quiet=True + ) + if 'vm' not in meta_data or 'netadp' not in meta_data: + util.logexc(LOG, "Failed to load metadata. Invalid format.") + return None + username = meta_data.get('additionalMetadata', {}).get('username') + ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', []) + + hash = None + if meta_data.get('additionalMetadata', {}).get('password'): + hash = meta_data['additionalMetadata']['password']['sha512'] + + network = generate_network_config(meta_data['netadp']) + + data = { + 'userdata': user_data, + 'metadata': { + 'instance-id': meta_data['vm']['_id'], + 'local-hostname': meta_data['vm']['name'], + 'public-keys': [] + }, + 'gratuitous_arp': [ + { + "source": ip["address"], + "destination": target + } + for netadp in meta_data['netadp'] + for ip in netadp['ip'] + for target in [ + netadp['network']["gateway"], + int2ip(ip2int(netadp['network']["gateway"]) + 2), + int2ip(ip2int(netadp['network']["gateway"]) + 3) + ] + ], + 'cfg': { + 'ssh_pwauth': True, + 'disable_root': True, + 'system_info': { + 'default_user': { + 'name': username, + 'gecos': username, + 'sudo': ['ALL=(ALL) NOPASSWD:ALL'], + 'passwd': hash, + 'lock_passwd': False, + 'ssh_authorized_keys': ssh_keys, + 'shell': '/bin/bash' + } + }, + 'network_config': network, + 'manage_etc_hosts': get_manage_etc_hosts(), + }, + } + + LOG.debug('returning DATA object:') + LOG.debug(data) + + return data + + +class DataSourceRbxCloud(sources.DataSource): + update_events = {'network': [ + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT + ]} + + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed = None + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + + def _get_data(self): + """ + Metadata is passed to the launching instance which + is used to perform instance configuration. + """ + rbx_data = get_md() + self.userdata_raw = rbx_data['userdata'] + self.metadata = rbx_data['metadata'] + self.gratuitous_arp = rbx_data['gratuitous_arp'] + self.cfg = rbx_data['cfg'] + return True + + @property + def network_config(self): + return self.cfg['network_config'] + + def get_public_ssh_keys(self): + return self.metadata['public-keys'] + + def get_userdata_raw(self): + return self.userdata_raw + + def get_config_obj(self): + return self.cfg + + def activate(self, cfg, is_new_instance): + gratuitous_arp(self.gratuitous_arp, self.distro) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceRbxCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst new file mode 100644 index 00000000..3d465bed --- /dev/null +++ b/doc/rtd/topics/datasources/rbxcloud.rst @@ -0,0 +1,25 @@ +.. _datasource_config_drive: + +Rbx Cloud +========= + +The Rbx datasource consumes the metadata drive available on platform +`HyperOne`_ and `Rootbox`_ platform. + +Datasource supports, in particular, network configurations, hostname, +user accounts and user metadata. + +Metadata drive +-------------- + +Drive metadata is a `FAT`_-formatted partition with the ```CLOUDMD``` label on +the system disk. Its contents are refreshed each time the virtual machine +is restarted, if the partition exists. For more information see +`HyperOne docs`_. + +.. _HyperOne: http://www.hyperone.com/ +.. _Rootbox: https://rootbox.com/ +.. _HyperOne Virtual Machine docs: http://www.hyperone.com/ +.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py new file mode 100644 index 00000000..aabf1f18 --- /dev/null +++ b/tests/unittests/test_datasource/test_rbx.py @@ -0,0 +1,208 @@ +import json + +from cloudinit import helpers +from cloudinit import distros +from cloudinit.sources import DataSourceRbxCloud as ds +from cloudinit.tests.helpers import mock, CiTestCase, populate_dir + +DS_PATH = "cloudinit.sources.DataSourceRbxCloud" + +CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ + "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ + "tToyGP41.s1" + +CLOUD_METADATA = { + "vm": { + "memory": 4, + "cpu": 2, + "name": "vm-image-builder", + "_id": "5beab44f680cffd11f0e60fc" + }, + "additionalMetadata": { + "username": "guru", + "sshKeys": ["ssh-rsa ..."], + "password": { + "sha512": CRYPTO_PASS + } + }, + "disk": [ + {"size": 10, "type": "ssd", + "name": "vm-image-builder-os", + "_id": "5beab450680cffd11f0e60fe"}, + {"size": 2, "type": "ssd", + "name": "ubuntu-1804-bionic", + "_id": "5bef002c680cffd11f107590"} + ], + "netadp": [ + { + "ip": [{"address": "62.181.8.174"}], + "network": { + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "routing": [], + "gateway": "62.181.8.1", + "netmask": "255.255.248.0", + "name": "public", + "type": "public", + "_id": "5784e97be2627505227b578c" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:03", + "_id": "5beab450680cffd11f0e6102" + }, + { + "ip": [{"address": "10.209.78.11"}], + "network": { + "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, + "routing": [], + "gateway": "10.209.78.1", + "netmask": "255.255.255.0", + "name": "network-determined-bardeen", + "type": "private", + "_id": "5beaec64680cffd11f0e7c31" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:24", + "_id": "5bec18c6680cffd11f0f0d8b" + } + ], + "dvddrive": [{"iso": {}}] +} + + +class TestRbxDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def setUp(self): + super(TestRbxDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp} + ) + + # defaults for few tests + self.ds = ds.DataSourceRbxCloud + self.seed_dir = self.paths.seed_dir + self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} + + def test_seed_read_user_data_callback_empty_file(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, {}) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertIsNone(results) + + def test_seed_read_user_data_callback_valid_disk(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertTrue('metadata' in results) + self.assertTrue('cfg' in results) + + def test_seed_read_user_data_callback_userdata(self): + userdata = "#!/bin/sh\nexit 1" + populate_user_metadata(self.seed_dir, userdata) + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertEqual(results['userdata'], userdata) + + def test_generate_network_config(self): + expected = { + 'version': 1, + 'config': [ + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'netmask': '255.255.248.0', + 'address': '62.181.8.174', + 'type': 'static', 'gateway': '62.181.8.1'} + ], + 'type': 'physical', + 'name': 'eth0', + 'mac_address': '00:15:5d:ff:0f:03' + }, + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], + 'netmask': '255.255.255.0', + 'address': '10.209.78.11', + 'type': 'static', + 'gateway': '10.209.78.1'} + ], + 'type': 'physical', + 'name': 'eth1', + 'mac_address': '00:15:5d:ff:0f:24' + } + ] + } + self.assertTrue( + ds.generate_network_config(CLOUD_METADATA['netadp']), + expected + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_gratuitous_arp_run_standard_arping(self, m_subp): + """Test handle run arping & parameters.""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_handle_rhel_like_arping(self, m_subp): + """Test handle on RHEL-like distros.""" + items = [ + { + 'source': '172.16.6.104', + 'destination': '172.17.0.2', + } + ] + ds.gratuitous_arp(items, self._fetch_distro('fedora')) + self.assertEqual([ + mock.call( + ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] + )], + m_subp.call_args_list + ) + + +def populate_cloud_metadata(path, data): + populate_dir(path, {'cloud.json': json.dumps(data)}) + + +def populate_user_metadata(path, data): + populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7aeeb91c..c5b5c46c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -267,10 +267,13 @@ class TestDsIdentify(DsIdentifyBase): """ConfigDrive datasource has a disk with LABEL=config-2.""" self._test_ds_found('ConfigDrive') + def test_rbx_cloud(self): + """Rbx datasource has a disk with LABEL=CLOUDMD.""" + self._test_ds_found('RbxCloud') + def test_config_drive_upper(self): """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" self._test_ds_found('ConfigDriveUpper') - return def test_config_drive_seed(self): """Config Drive seed directory.""" @@ -896,6 +899,18 @@ VALID_CFG = { os.path.join(P_SEED_DIR, 'config_drive', 'openstack', 'latest', 'meta_data.json'): 'md\n'}, }, + 'RbxCloud': { + 'ds': 'RbxCloud', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CLOUDMD'}] + )}, + ], + }, 'Hetzner': { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, diff --git a/tools/ds-identify b/tools/ds-identify index f76f2a6e..40fc0604 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -702,6 +702,11 @@ dscheck_OpenNebula() { return ${DS_NOT_FOUND} } +dscheck_RbxCloud() { + has_fs_with_label "CLOUDMD" "cloudmd" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + ovf_vmware_guest_customization() { # vmware guest customization -- cgit v1.2.3 From 7d5d34f3643a2108d667759f57a5ab63d0affadd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 11 Oct 2019 14:54:49 +0000 Subject: Add Support for e24cloud to Ec2 datasource. e24cloud provides an EC2 compatible datasource. This just identifies their platform based on dmi 'system-vendor' having 'e24cloud'. https://www.e24cloud.com/en/ . Updated chassis typo in zstack unit test docstring. LP: #1696476 --- cloudinit/apport.py | 1 + cloudinit/sources/DataSourceEc2.py | 12 +++++++++++- doc/rtd/topics/datasources.rst | 3 ++- doc/rtd/topics/datasources/e24cloud.rst | 9 +++++++++ tests/unittests/test_datasource/test_ec2.py | 15 ++++++++++++++- tests/unittests/test_ds_identify.py | 18 +++++++++++++++++- tools/ds-identify | 5 +++++ 7 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 doc/rtd/topics/datasources/e24cloud.rst (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index fde1f75b..c6797f12 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -22,6 +22,7 @@ KNOWN_CLOUD_NAMES = [ 'CloudSigma', 'CloudStack', 'DigitalOcean', + 'E24Cloud', 'GCE - Google Compute Engine', 'Exoscale', 'Hetzner Cloud', diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6c72ace2..1d88c9b1 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -34,6 +34,7 @@ class CloudNames(object): AWS = "aws" BRIGHTBOX = "brightbox" ZSTACK = "zstack" + E24CLOUD = "e24cloud" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "unknown" @@ -483,11 +484,16 @@ def identify_zstack(data): return CloudNames.ZSTACK +def identify_e24cloud(data): + if data['vendor'] == 'e24cloud': + return CloudNames.E24CLOUD + + def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() checks = (identify_aws, identify_brightbox, identify_zstack, - lambda x: CloudNames.UNKNOWN) + identify_e24cloud, lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) @@ -506,6 +512,7 @@ def _collect_platform_data(): uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) asset_tag: 'dmidecode -s chassis-asset-tag' + vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. @@ -534,6 +541,9 @@ def _collect_platform_data(): data['asset_tag'] = asset_tag.lower() + vendor = util.read_dmi_data('system-manufacturer') + data['vendor'] = (vendor if vendor else '').lower() + return data diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index a337c08c..70fbe07d 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -29,8 +29,9 @@ The following is a list of documents for each supported datasource: datasources/aliyun.rst datasources/altcloud.rst - datasources/ec2.rst datasources/azure.rst + datasources/ec2.rst + datasources/e24cloud.rst datasources/cloudsigma.rst datasources/cloudstack.rst datasources/configdrive.rst diff --git a/doc/rtd/topics/datasources/e24cloud.rst b/doc/rtd/topics/datasources/e24cloud.rst new file mode 100644 index 00000000..de9a4127 --- /dev/null +++ b/doc/rtd/topics/datasources/e24cloud.rst @@ -0,0 +1,9 @@ +.. _datasource_e24cloud: + +E24Cloud +======== +`E24Cloud ` platform provides an AWS Ec2 metadata +service clone. It identifies itself to guests using the dmi +system-manufacturer (/sys/class/dmi/id/sys_vendor). + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 6fabf258..5e1dd777 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -672,13 +672,14 @@ class TesIdentifyPlatform(test_helpers.CiTestCase): 'serial': 'H23-C4J3JV-R6', 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', 'uuid_source': 'dmi', + 'vendor': 'tothecloud', } unspecial.update(**kwargs) return unspecial @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') def test_identify_zstack(self, m_collect): - """zstack should be identified if cassis-asset-tag ends in .zstack.io + """zstack should be identified if chassis-asset-tag ends in .zstack.io """ m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) @@ -690,4 +691,16 @@ class TesIdentifyPlatform(test_helpers.CiTestCase): m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloud') + self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud_negative(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloudyday') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index c5b5c46c..12c6ae36 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -616,6 +616,14 @@ class TestDsIdentify(DsIdentifyBase): """EC2: chassis asset tag ends with 'zstack.io'""" self._test_ds_found('Ec2-ZStack') + def test_e24cloud_is_ec2(self): + """EC2: e24cloud identified by sys_vendor""" + self._test_ds_found('Ec2-E24Cloud') + + def test_e24cloud_not_active(self): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-E24Cloud-negative') + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -994,7 +1002,15 @@ VALID_CFG = { 'Ec2-ZStack': { 'ds': 'Ec2', 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, - } + }, + 'Ec2-E24Cloud': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloud\n'}, + }, + 'Ec2-E24Cloud-negative': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, + } } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 40fc0604..20a99ee9 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -905,6 +905,11 @@ ec2_identify_platform() { *.zstack.io) _RET="ZStack"; return 0;; esac + local vendor="${DI_DMI_SYS_VENDOR}" + case "$vendor" in + e24cloud) _RET="E24cloud"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" -- cgit v1.2.3 From fac98983187c0984aa79c569c4b76cab90fd6f47 Mon Sep 17 00:00:00 2001 From: Harald Jensås Date: Wed, 16 Oct 2019 15:30:28 +0000 Subject: net: handle openstack dhcpv6-stateless configuration Openstack subnets can be configured to use SLAAC by setting ipv6_address_mode=dhcpv6-stateless. When this is the case the sysconfig interface configuration should use IPV6_AUTOCONF=yes and not set DHCPV6C=yes. This change sets the subnets type property to the full network['type'] from openstack metadata. cloudinit/net/sysconfig.py and cloudinit/net/eni.py are updated to support new subnet types: - 'ipv6_dhcpv6-stateless' => IPV6_AUTOCONF=yes - 'ipv6_dhcpv6-stateful' => DHCPV6C=yes Type 'dhcp6' in sysconfig is kept for backward compatibility with any implementations that set subnet_type == 'dhcp6'. LP: #1847517 --- cloudinit/net/eni.py | 7 +- cloudinit/net/sysconfig.py | 7 +- cloudinit/sources/helpers/openstack.py | 3 +- .../unittests/test_datasource/test_configdrive.py | 39 ++++++++++ tests/unittests/test_net.py | 88 ++++++++++++++++++++++ 5 files changed, 141 insertions(+), 3 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index b129bb62..530922b5 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -411,8 +411,13 @@ class Renderer(renderer.Renderer): else: ipv4_subnet_mtu = subnet.get('mtu') iface['inet'] = subnet_inet - if subnet['type'].startswith('dhcp'): + if (subnet['type'] == 'dhcp4' or subnet['type'] == 'dhcp6' or + subnet['type'] == 'ipv6_dhcpv6-stateful'): + # Configure network settings using DHCP or DHCPv6 iface['mode'] = 'dhcp' + elif subnet['type'] == 'ipv6_dhcpv6-stateless': + # Configure network settings using SLAAC from RAs + iface['mode'] = 'auto' # do not emit multiple 'auto $IFACE' lines as older (precise) # ifupdown complains diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 87b548e5..4e656768 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -343,10 +343,15 @@ class Renderer(renderer.Renderer): for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): mtu_key = 'MTU' subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': + if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful': # TODO need to set BOOTPROTO to dhcp6 on SUSE iface_cfg['IPV6INIT'] = True + # Configure network settings using DHCPv6 iface_cfg['DHCPV6C'] = True + elif subnet_type == 'ipv6_dhcpv6-stateless': + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs + iface_cfg['IPV6_AUTOCONF'] = True elif subnet_type in ['dhcp4', 'dhcp']: iface_cfg['BOOTPROTO'] = 'dhcp' elif subnet_type == 'static': diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 8f069115..d1c4601a 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -585,7 +585,8 @@ def convert_net_json(network_json=None, known_macs=None): subnet = dict((k, v) for k, v in network.items() if k in valid_keys['subnet']) if 'dhcp' in network['type']: - t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' + t = (network['type'] if network['type'].startswith('ipv6') + else 'dhcp4') subnet.update({ 'type': t, }) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 520c50fe..8c788c1c 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -499,6 +499,45 @@ class TestNetJson(CiTestCase): known_macs=KNOWN_MACS) self.assertEqual(myds.network_config, network_config) + def test_network_config_conversion_dhcp6(self): + """Test some ipv6 input network json and check the expected + conversions.""" + in_data = { + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + ] + } + out_data = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:69:b0:58', + 'mtu': None, + 'name': 'enp0s1', + 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], + 'type': 'physical'}, + {'mac_address': 'fa:16:3e:d4:57:ad', + 'mtu': None, + 'name': 'enp0s2', + 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], + 'type': 'physical'} + ], + } + conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) + self.assertEqual(out_data, conv_data) + def test_network_config_conversions(self): """Tests a bunch of input network json and checks the expected conversions.""" diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b6597412..f5a9cae6 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1070,6 +1070,82 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_stateless': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateless'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_stateful': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateful'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'all': { 'expected_eni': ("""\ auto lo @@ -2781,6 +2857,18 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_dhcpv6_stateless_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_stateful_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateful'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + def test_check_ifcfg_rh(self): """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" render_dir = self.tmp_dir() -- cgit v1.2.3 From e1b4b8c903fed3b69e57ec08c17ce94097d55901 Mon Sep 17 00:00:00 2001 From: Sam Eiderman Date: Tue, 29 Oct 2019 23:00:36 +0000 Subject: azure: Do not lock user on instance id change After initial boot ovf-env.xml is copied to agent dir (/var/lib/waagent/) with REDACTED password. On subsequent boots DataSourceAzure loads with a configuration where the user specified in /var/lib/waagent/ovf-env.xml is locked. If instance id changes, cc_users_groups action will lock the user. Fix this behavior by not locking the user if its password is REDACTED. LP: #1849677 --- cloudinit/sources/DataSourceAzure.py | 5 +++-- tests/unittests/test_datasource/test_azure.py | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 4984fa84..cdf49d36 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1193,9 +1193,10 @@ def read_azure_ovf(contents): defuser = {} if username: defuser['name'] = username - if password and DEF_PASSWD_REDACTION != password: - defuser['passwd'] = encrypt_pass(password) + if password: defuser['lock_passwd'] = False + if DEF_PASSWD_REDACTION != password: + defuser['passwd'] = encrypt_pass(password) if defuser: cfg['system_info'] = {'default_user': defuser} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3547dd94..80c6f019 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -769,6 +769,22 @@ scbus-1 on xpt0 bus 0 crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) + def test_user_not_locked_if_password_redacted(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': dsaz.DEF_PASSWD_REDACTION} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue('default_user' in dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user should be updated username and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertIn('lock_passwd', defuser) + self.assertFalse(defuser['lock_passwd']) + def test_userdata_plain(self): mydata = "FOOBAR" odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} -- cgit v1.2.3 From a61ee02a50eb21954c114e01d2d042916bb2dc14 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Thu, 31 Oct 2019 15:15:51 +0000 Subject: OVF: disable custom script execution by default For security concern, we disable the custom script by default.If a custom script is provided, stop customization unless the custom script is explicitly enabled by tools config. --- cloudinit/sources/DataSourceOVF.py | 4 ++-- tests/unittests/test_datasource/test_ovf.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index e7794aab..896841e3 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -155,8 +155,8 @@ class DataSourceOVF(sources.DataSource): custScriptConfig = get_tools_config( CONFGROUPNAME_GUESTCUSTOMIZATION, GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, - "true") - if custScriptConfig.lower() == "false": + "false") + if custScriptConfig.lower() != "true": # Update the customization status if there is a # custom script is disabled if special_customization and customscript: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index a615470a..a19c35c8 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -204,7 +204,7 @@ class TestDatasourceOVF(CiTestCase): customscript = self.tmp_path('test-script', self.tdir) util.write_file(customscript, "This is the post cust script") - with mock.patch(MPATH + 'get_tools_config', return_value='false'): + with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): with mock.patch(MPATH + 'set_customization_status', return_value=('msg', b'')): with self.assertRaises(RuntimeError) as context: -- cgit v1.2.3 From e81389592a67bb54b889512928dcdf65f87ad436 Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Thu, 31 Oct 2019 19:45:29 +0000 Subject: DataSourceSmartOS: reconfigure network on each boot In typical cases, SmartOS does not use DHCP for network configuration. As such, if the network configuration changes that is reflected in metadata and will be picked up during the next boot. LP: #1765801 Joyent: OS-6902 reconfigure network on each boot --- cloudinit/sources/DataSourceSmartOS.py | 8 +++++++- tests/unittests/test_datasource/test_smartos.py | 9 ++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 32b57cdd..cf676504 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -1,5 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. -# Copyright (c) 2018, Joyent, Inc. +# Copyright 2019 Joyent, Inc. # # Author: Ben Howard # @@ -34,6 +34,7 @@ from cloudinit import log as logging from cloudinit import serial from cloudinit import sources from cloudinit import util +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -178,6 +179,7 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = {} self.network_data = None self._network_config = None + self.update_events['network'].add(EventType.BOOT) self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) @@ -319,6 +321,10 @@ class DataSourceSmartOS(sources.DataSource): @property def network_config(self): + # sources.clear_cached_data() may set _network_config to '_unset'. + if self._network_config == sources.UNSET: + self._network_config = None + if self._network_config is None: if self.network_data is not None: self._network_config = ( diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 42ac6971..d5b1c29c 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -1,5 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. -# Copyright (c) 2018, Joyent, Inc. +# Copyright 2019 Joyent, Inc. # # Author: Ben Howard # @@ -31,6 +31,7 @@ from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, identify_file) +from cloudinit.event import EventType import six @@ -653,6 +654,12 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.assertEqual(dsrc.device_name_to_device('FOO'), mydscfg['disk_aliases']['FOO']) + def test_reconfig_network_on_boot(self): + # Test to ensure that network is configured from metadata on each boot + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]), + dsrc.update_events['network']) + class TestIdentifyFile(CiTestCase): """Test the 'identify_file' utility.""" -- cgit v1.2.3 From 15fa154602f281c9239084d7d20a0999c6b09970 Mon Sep 17 00:00:00 2001 From: David Kindred Date: Mon, 4 Nov 2019 22:00:19 +0000 Subject: configdrive: fix subplatform config-drive for /config-drive source When ConfigDrive discovers the source path /config-drive, subplatform is now reports 'config-drive' LP: #1849731 --- cloudinit/sources/DataSourceConfigDrive.py | 6 +++--- .../unittests/test_datasource/test_configdrive.py | 23 ++++++++++++++++++---- 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index c3627152..f77923c2 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -163,10 +163,10 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): def _get_subplatform(self): """Return the subplatform metadata source details.""" - if self.seed_dir in self.source: - subplatform_type = 'seed-dir' - elif self.source.startswith('/dev'): + if self.source.startswith('/dev'): subplatform_type = 'config-disk' + else: + subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.source) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 8c788c1c..cfb3b0a7 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -220,13 +220,15 @@ CFG_DRIVE_FILES_V2 = { 'openstack/2015-10-15/user_data': USER_DATA, 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} +M_PATH = "cloudinit.sources.DataSourceConfigDrive." + class TestConfigDriveDataSource(CiTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() self.add_patch( - "cloudinit.sources.DataSourceConfigDrive.util.find_devs_with", + M_PATH + "util.find_devs_with", "m_find_devs_with", return_value=[]) self.tmp = self.tmp_dir() @@ -468,7 +470,7 @@ class TestConfigDriveDataSource(CiTestCase): util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_pubkeys_v2(self, on_first_boot): """Verify that public-keys work in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) @@ -478,6 +480,19 @@ class TestConfigDriveDataSource(CiTestCase): self.assertEqual('openstack', myds.platform) self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) + def test_subplatform_config_drive_when_starts_with_dev(self): + """subplatform reports config-drive when source starts with /dev/.""" + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs: + with mock.patch(M_PATH + 'util.is_FreeBSD', return_value=False): + with mock.patch(M_PATH + 'util.mount_cb'): + with mock.patch(M_PATH + 'on_first_boot'): + m_find_devs.return_value = ['/dev/anything'] + self.assertEqual(True, cfg_ds.get_data()) + self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) + class TestNetJson(CiTestCase): def setUp(self): @@ -485,13 +500,13 @@ class TestNetJson(CiTestCase): self.tmp = self.tmp_dir() self.maxDiff = None - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_network_data_is_found(self, on_first_boot): """Verify that network_data is present in ds in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertIsNotNone(myds.network_json) - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_network_config_is_converted(self, on_first_boot): """Verify that network_data is converted and present on ds object.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) -- cgit v1.2.3 From 02f07b666adc62d70c4f1a98c2ae80cb6629fa9a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 4 Nov 2019 22:11:37 +0000 Subject: azure: support matching dhcp route-metrics for dual-stack ipv4 ipv6 Network v2 configuration for Azure will set both dhcp4 and dhcp6 to False by default. When IPv6 privateIpAddresses are present for an interface in Azure's Instance Metadata Service (IMDS), set dhcp6: True and provide a route-metric value that will match the corresponding dhcp4 route-metric. The route-metric value will increase by 100 for each additional interface present to ensure the primary interface has a route to IMDS. Also fix dhcp route-metric rendering for eni and sysconfig distros. LP: #1850308 --- cloudinit/net/network_state.py | 17 ++++- cloudinit/net/sysconfig.py | 6 +- cloudinit/sources/DataSourceAzure.py | 10 ++- tests/unittests/test_datasource/test_azure.py | 101 ++++++++++++++++++++++++++ tests/unittests/test_net.py | 54 ++++++++++++++ 5 files changed, 178 insertions(+), 10 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index ba85c69e..20b7716b 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -22,8 +22,9 @@ NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } NETWORK_V2_KEY_FILTER = [ - 'addresses', 'dhcp4', 'dhcp6', 'gateway4', 'gateway6', 'interfaces', - 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan' + 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides', + 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers', + 'renderer', 'set-name', 'wakeonlan' ] NET_CONFIG_TO_V2 = { @@ -747,12 +748,20 @@ class NetworkStateInterpreter(object): def _v2_to_v1_ipcfg(self, cfg): """Common ipconfig extraction from v2 to v1 subnets array.""" + def _add_dhcp_overrides(overrides, subnet): + if 'route-metric' in overrides: + subnet['metric'] = overrides['route-metric'] + subnets = [] if cfg.get('dhcp4'): - subnets.append({'type': 'dhcp4'}) + subnet = {'type': 'dhcp4'} + _add_dhcp_overrides(cfg.get('dhcp4-overrides', {}), subnet) + subnets.append(subnet) if cfg.get('dhcp6'): + subnet = {'type': 'dhcp6'} self.use_ipv6 = True - subnets.append({'type': 'dhcp6'}) + _add_dhcp_overrides(cfg.get('dhcp6-overrides', {}), subnet) + subnets.append(subnet) gateway4 = None gateway6 = None diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 6717d924..fe0c67ca 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -395,6 +395,9 @@ class Renderer(renderer.Renderer): ipv6_index = -1 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): subnet_type = subnet.get('type') + # metric may apply to both dhcp and static config + if 'metric' in subnet: + iface_cfg['METRIC'] = subnet['metric'] if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: if has_default_route and iface_cfg['BOOTPROTO'] != 'none': iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False @@ -426,9 +429,6 @@ class Renderer(renderer.Renderer): else: iface_cfg['GATEWAY'] = subnet['gateway'] - if 'metric' in subnet: - iface_cfg['METRIC'] = subnet['metric'] - if 'dns_search' in subnet: iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search']) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index cdf49d36..44cca210 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1322,7 +1322,8 @@ def parse_network_config(imds_metadata): network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} + dev_config = {'dhcp4': False, 'dhcp6': False} + dhcp_override = {'route-metric': (idx + 1) * 100} for addr4 in intf['ipv4']['ipAddress']: privateIpv4 = addr4['privateIpAddress'] if privateIpv4: @@ -1340,12 +1341,15 @@ def parse_network_config(imds_metadata): # non-primary interfaces should have a higher # route-metric (cost) so default routes prefer # primary nic due to lower route-metric value - dev_config['dhcp4-overrides'] = { - 'route-metric': (idx + 1) * 100} + dev_config['dhcp4-overrides'] = dhcp_override for addr6 in intf['ipv6']['ipAddress']: privateIpv6 = addr6['privateIpAddress'] if privateIpv6: dev_config['dhcp6'] = True + # non-primary interfaces should have a higher + # route-metric (cost) so default routes prefer + # primary nic due to lower route-metric value + dev_config['dhcp6-overrides'] = dhcp_override break if dev_config: mac = ':'.join(re.findall(r'..', intf['macAddress'])) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 80c6f019..d92d7b2f 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -153,6 +153,102 @@ SECONDARY_INTERFACE = { MOCKPATH = 'cloudinit.sources.DataSourceAzure.' +class TestParseNetworkConfig(CiTestCase): + + maxDiff = None + + def test_single_ipv4_nic_configuration(self): + """parse_network_config emits dhcp on single nic with ipv4""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) + + def test_increases_route_metric_for_non_primary_nics(self): + """parse_network_config increases route-metric for each nic""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_and_ipv6_route_metrics_match_for_nics(self): + """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp6': False, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + third_intf['ipv6'] = { + "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_secondary_ips_will_be_static_addrs(self): + """parse_network_config emits primary ipv4 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + # Secondary ipv6 addresses currently ignored/unconfigured + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + class TestGetMetadataFromIMDS(HttprettyTestCase): with_logs = True @@ -641,6 +737,7 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}}}, 'version': 2} @@ -658,14 +755,17 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}}, 'eth1': {'set-name': 'eth1', 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200}}, 'eth2': {'set-name': 'eth2', 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} @@ -999,6 +1099,7 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}}, 'version': 2} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 6f83ad73..35ce55d2 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3101,6 +3101,36 @@ USERCTL=no self._compare_files_to_expected( expected, self._render_and_read(network_config=v2data)) + def test_from_v2_route_metric(self): + """verify route-metric gets rendered on nic when source is netplan.""" + overrides = {'route-metric': 100} + v2base = { + 'version': 2, + 'ethernets': { + 'eno1': {'dhcp4': True, + 'match': {'macaddress': '07-1c-c6-75-a4-be'}}}} + expected = { + 'ifcfg-eno1': textwrap.dedent("""\ + BOOTPROTO=dhcp + DEVICE=eno1 + HWADDR=07-1c-c6-75-a4-be + METRIC=100 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + } + for dhcp_ver in ('dhcp4', 'dhcp6'): + v2data = copy.deepcopy(v2base) + if dhcp_ver == 'dhcp6': + expected['ifcfg-eno1'] += "IPV6INIT=yes\nDHCPV6C=yes\n" + v2data['ethernets']['eno1'].update( + {dhcp_ver: True, '{0}-overrides'.format(dhcp_ver): overrides}) + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + class TestOpenSuseSysConfigRendering(CiTestCase): @@ -3466,6 +3496,30 @@ iface eth0 inet dhcp self.assertEqual( expected, dir2dict(tmp_dir)['/etc/network/interfaces']) + def test_v2_route_metric_to_eni(self): + """Network v2 route-metric overrides are preserved in eni output""" + tmp_dir = self.tmp_dir() + renderer = eni.Renderer() + expected_tmpl = textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet{suffix} dhcp + metric 100 + """) + for dhcp_ver in ('dhcp4', 'dhcp6'): + suffix = '6' if dhcp_ver == 'dhcp6' else '' + dhcp_cfg = { + dhcp_ver: True, + '{ver}-overrides'.format(ver=dhcp_ver): {'route-metric': 100}} + v2_input = {'version': 2, 'ethernets': {'eth0': dhcp_cfg}} + ns = network_state.parse_net_config_data(v2_input) + renderer.render_network_state(ns, target=tmp_dir) + self.assertEqual( + expected_tmpl.format(suffix=suffix), + dir2dict(tmp_dir)['/etc/network/interfaces']) + class TestNetplanNetRendering(CiTestCase): -- cgit v1.2.3 From 9478f0f2fa6935d685092f344b23f34b883149a5 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 13 Nov 2019 13:00:12 -0700 Subject: azure: support secondary ipv6 addresses (#33) Azure's Instance Metadata Service (IMDS) reports multiple IPv6 addresses, via the http://169.254.169.254/metadata/instance/network route. Any additional values after the first in 'ipAddresses' under the 'ipv6' interface key are extracted and configured as static IPs on the interface. --- cloudinit/sources/DataSourceAzure.py | 49 +++++++++++++-------------- tests/unittests/test_datasource/test_azure.py | 34 +++++++++++++++++-- 2 files changed, 56 insertions(+), 27 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 44cca210..87a848ce 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1321,36 +1321,35 @@ def parse_network_config(imds_metadata): LOG.debug('Azure: generating network configuration from IMDS') network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): + # First IPv4 and/or IPv6 address will be obtained via DHCP. + # Any additional IPs of each type will be set as static + # addresses. nicname = 'eth{idx}'.format(idx=idx) - dev_config = {'dhcp4': False, 'dhcp6': False} dhcp_override = {'route-metric': (idx + 1) * 100} - for addr4 in intf['ipv4']['ipAddress']: - privateIpv4 = addr4['privateIpAddress'] - if privateIpv4: - if dev_config.get('dhcp4', False): - # Append static address config for ip > 1 - netPrefix = intf['ipv4']['subnet'][0].get( - 'prefix', '24') - if not dev_config.get('addresses'): - dev_config['addresses'] = [] - dev_config['addresses'].append( - '{ip}/{prefix}'.format( - ip=privateIpv4, prefix=netPrefix)) - else: - dev_config['dhcp4'] = True + dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override, + 'dhcp6': False} + for addr_type in ('ipv4', 'ipv6'): + addresses = intf.get(addr_type, {}).get('ipAddress', []) + if addr_type == 'ipv4': + default_prefix = '24' + else: + default_prefix = '128' + if addresses: + dev_config['dhcp6'] = True # non-primary interfaces should have a higher # route-metric (cost) so default routes prefer # primary nic due to lower route-metric value - dev_config['dhcp4-overrides'] = dhcp_override - for addr6 in intf['ipv6']['ipAddress']: - privateIpv6 = addr6['privateIpAddress'] - if privateIpv6: - dev_config['dhcp6'] = True - # non-primary interfaces should have a higher - # route-metric (cost) so default routes prefer - # primary nic due to lower route-metric value - dev_config['dhcp6-overrides'] = dhcp_override - break + dev_config['dhcp6-overrides'] = dhcp_override + for addr in addresses[1:]: + # Append static address config for ip > 1 + netPrefix = intf[addr_type]['subnet'][0].get( + 'prefix', default_prefix) + privateIp = addr['privateIpAddress'] + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIp, prefix=netPrefix)) if dev_config: mac = ':'.join(re.findall(r'..', intf['macAddress'])) dev_config.update( diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index d92d7b2f..59e351de 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -197,9 +197,11 @@ class TestParseNetworkConfig(CiTestCase): def test_ipv4_and_ipv6_route_metrics_match_for_nics(self): """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" expected = {'ethernets': { - 'eth0': {'dhcp4': True, + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'], + 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}, 'eth1': {'set-name': 'eth1', @@ -214,6 +216,14 @@ class TestParseNetworkConfig(CiTestCase): 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + nic1['ipv6'] = { + "subnet": [{"address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } imds_data['network']['interface'].append(SECONDARY_INTERFACE) third_intf = copy.deepcopy(SECONDARY_INTERFACE) third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') @@ -240,6 +250,26 @@ class TestParseNetworkConfig(CiTestCase): nic1 = imds_data['network']['interface'][0] nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv6_secondary_ips_will_be_static_cidrs(self): + """parse_network_config emits primary ipv6 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + # Secondary ipv6 addresses currently ignored/unconfigured nic1['ipv6'] = { "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], -- cgit v1.2.3 From 62bbc262c3c7f633eac1d09ec78c055eef05166a Mon Sep 17 00:00:00 2001 From: Harald Date: Wed, 20 Nov 2019 18:55:27 +0100 Subject: net: IPv6, accept_ra, slaac, stateless (#51) Router advertisements are required for the default route to be set up, thus accept_ra should be enabled for dhcpv6-stateful. sysconf: IPV6_FORCE_ACCEPT_RA controls accept_ra sysctl. eni: mode static and mode dhcp 'accept_ra' controls sysctl. Add 'accept-ra: true|false' parameter to config v1 and v2. When True: accept_ra is set to '1'. When False: accept_ra is set to '0'. When not defined in config the value is left to the operating system default. This change also extend the IPv6 support to distinguish between slaac and dhcpv6-stateless. SLAAC is autoconfig without any options from DHCP, while stateless auto-configures the address and the uses DHCP for other options. LP: #1806014 LP: #1808647 --- cloudinit/net/eni.py | 15 ++ cloudinit/net/netplan.py | 9 +- cloudinit/net/network_state.py | 21 +- cloudinit/net/sysconfig.py | 34 ++- cloudinit/sources/helpers/openstack.py | 21 +- .../unittests/test_datasource/test_configdrive.py | 3 +- tests/unittests/test_net.py | 238 ++++++++++++++++++++- 7 files changed, 320 insertions(+), 21 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index a9a80c95..70771060 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -399,6 +399,7 @@ class Renderer(renderer.Renderer): def _render_iface(self, iface, render_hwaddress=False): sections = [] subnets = iface.get('subnets', {}) + accept_ra = iface.pop('accept-ra', None) if subnets: for index, subnet in enumerate(subnets): ipv4_subnet_mtu = None @@ -415,9 +416,23 @@ class Renderer(renderer.Renderer): subnet['type'] == 'ipv6_dhcpv6-stateful'): # Configure network settings using DHCP or DHCPv6 iface['mode'] = 'dhcp' + if accept_ra is not None: + # Accept router advertisements (0=off, 1=on) + iface['accept_ra'] = '1' if accept_ra else '0' elif subnet['type'] == 'ipv6_dhcpv6-stateless': # Configure network settings using SLAAC from RAs iface['mode'] = 'auto' + # Use stateless DHCPv6 (0=off, 1=on) + iface['dhcp'] = '1' + elif subnet['type'] == 'ipv6_slaac': + # Configure network settings using SLAAC from RAs + iface['mode'] = 'auto' + # Use stateless DHCPv6 (0=off, 1=on) + iface['dhcp'] = '0' + elif subnet_is_ipv6(subnet) and subnet['type'] == 'static': + if accept_ra is not None: + # Accept router advertisements (0=off, 1=on) + iface['accept_ra'] = '1' if accept_ra else '0' # do not emit multiple 'auto $IFACE' lines as older (precise) # ifupdown complains diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 749d46f8..14d3999f 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -4,7 +4,7 @@ import copy import os from . import renderer -from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2 +from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES from cloudinit import log as logging from cloudinit import util @@ -52,7 +52,8 @@ def _extract_addresses(config, entry, ifname, features=None): 'mtu': 1480, 'netmask': 64, 'type': 'static'}], - 'type: physical' + 'type: physical', + 'accept-ra': 'true' } An entry dictionary looks like: @@ -95,6 +96,8 @@ def _extract_addresses(config, entry, ifname, features=None): if sn_type == 'dhcp': sn_type += '4' entry.update({sn_type: True}) + elif sn_type in IPV6_DYNAMIC_TYPES: + entry.update({'dhcp6': True}) elif sn_type in ['static']: addr = "%s" % subnet.get('address') if 'prefix' in subnet: @@ -147,6 +150,8 @@ def _extract_addresses(config, entry, ifname, features=None): ns = entry.get('nameservers', {}) ns.update({'search': searchdomains}) entry.update({'nameservers': ns}) + if 'accept-ra' in config and config['accept-ra'] is not None: + entry.update({'accept-ra': util.is_true(config.get('accept-ra'))}) def _extract_bond_slaves_by_name(interfaces, entry, bond_master): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 20b7716b..7d206a1a 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -18,13 +18,17 @@ from cloudinit import util LOG = logging.getLogger(__name__) NETWORK_STATE_VERSION = 1 +IPV6_DYNAMIC_TYPES = ['dhcp6', + 'ipv6_slaac', + 'ipv6_dhcpv6-stateless', + 'ipv6_dhcpv6-stateful'] NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } NETWORK_V2_KEY_FILTER = [ 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides', 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers', - 'renderer', 'set-name', 'wakeonlan' + 'renderer', 'set-name', 'wakeonlan', 'accept-ra' ] NET_CONFIG_TO_V2 = { @@ -342,7 +346,8 @@ class NetworkStateInterpreter(object): 'name': 'eth0', 'subnets': [ {'type': 'dhcp4'} - ] + ], + 'accept-ra': 'true' } ''' @@ -362,6 +367,9 @@ class NetworkStateInterpreter(object): self.use_ipv6 = True break + accept_ra = command.get('accept-ra', None) + if accept_ra is not None: + accept_ra = util.is_true(accept_ra) iface.update({ 'name': command.get('name'), 'type': command.get('type'), @@ -372,6 +380,7 @@ class NetworkStateInterpreter(object): 'address': None, 'gateway': None, 'subnets': subnets, + 'accept-ra': accept_ra }) self._network_state['interfaces'].update({command.get('name'): iface}) self.dump_network_state() @@ -615,6 +624,7 @@ class NetworkStateInterpreter(object): driver: ixgbe set-name: lom1 dhcp6: true + accept-ra: true switchports: match: name: enp2* @@ -643,7 +653,7 @@ class NetworkStateInterpreter(object): driver = match.get('driver', None) if driver: phy_cmd['params'] = {'driver': driver} - for key in ['mtu', 'match', 'wakeonlan']: + for key in ['mtu', 'match', 'wakeonlan', 'accept-ra']: if key in cfg: phy_cmd[key] = cfg[key] @@ -928,8 +938,9 @@ def is_ipv6_addr(address): def subnet_is_ipv6(subnet): """Common helper for checking network_state subnets for ipv6.""" - # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful' or 'ipv6_dhcpv6-stateless' - if subnet['type'].endswith('6') or subnet['type'].startswith('ipv6'): + # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or + # 'ipv6_slaac' + if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES: # This is a request for DHCPv6. return True elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index fe0c67ca..310cdf01 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -14,7 +14,7 @@ from configobj import ConfigObj from . import renderer from .network_state import ( - is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6) + is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) LOG = logging.getLogger(__name__) NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" @@ -335,6 +335,9 @@ class Renderer(renderer.Renderer): continue iface_cfg[new_key] = old_value + if iface['accept-ra'] is not None: + iface_cfg['IPV6_FORCE_ACCEPT_RA'] = iface['accept-ra'] + @classmethod def _render_subnets(cls, iface_cfg, subnets, has_default_route): # setting base values @@ -350,6 +353,15 @@ class Renderer(renderer.Renderer): # Configure network settings using DHCPv6 iface_cfg['DHCPV6C'] = True elif subnet_type == 'ipv6_dhcpv6-stateless': + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs and optional + # info from dhcp server using DHCPv6 + iface_cfg['IPV6_AUTOCONF'] = True + iface_cfg['DHCPV6C'] = True + # Use Information-request to get only stateless configuration + # parameters (i.e., without address). + iface_cfg['DHCPV6C_OPTIONS'] = '-S' + elif subnet_type == 'ipv6_slaac': iface_cfg['IPV6INIT'] = True # Configure network settings using SLAAC from RAs iface_cfg['IPV6_AUTOCONF'] = True @@ -398,10 +410,15 @@ class Renderer(renderer.Renderer): # metric may apply to both dhcp and static config if 'metric' in subnet: iface_cfg['METRIC'] = subnet['metric'] + # TODO(hjensas): Including dhcp6 here is likely incorrect. DHCPv6 + # does not ever provide a default gateway, the default gateway + # come from RA's. (https://github.com/openSUSE/wicked/issues/570) if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: if has_default_route and iface_cfg['BOOTPROTO'] != 'none': iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False continue + elif subnet_type in IPV6_DYNAMIC_TYPES: + continue elif subnet_type == 'static': if subnet_is_ipv6(subnet): ipv6_index = ipv6_index + 1 @@ -444,10 +461,14 @@ class Renderer(renderer.Renderer): @classmethod def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): for _, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') for route in subnet.get('routes', []): is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway']) - if _is_default_route(route): + # Any dynamic configuration method, slaac, dhcpv6-stateful/ + # stateless should get router information from router RA's. + if (_is_default_route(route) and subnet_type not in + IPV6_DYNAMIC_TYPES): if ( (subnet.get('ipv4') and route_cfg.has_set_default_ipv4) or @@ -466,10 +487,17 @@ class Renderer(renderer.Renderer): # TODO(harlowja): add validation that no other iface has # also provided the default route? iface_cfg['DEFROUTE'] = True + # TODO(hjensas): Including dhcp6 here is likely incorrect. + # DHCPv6 does not ever provide a default gateway, the + # default gateway come from RA's. + # (https://github.com/openSUSE/wicked/issues/570) if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'): + # NOTE(hjensas): DHCLIENT_SET_DEFAULT_ROUTE is SuSE + # only. RHEL, CentOS, Fedora does not implement this + # option. iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True if 'gateway' in route: - if is_ipv6 or is_ipv6_addr(route['gateway']): + if is_ipv6: iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] route_cfg.has_set_default_ipv6 = True else: diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index d1c4601a..0778f45a 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -584,17 +584,24 @@ def convert_net_json(network_json=None, known_macs=None): if n['link'] == link['id']]: subnet = dict((k, v) for k, v in network.items() if k in valid_keys['subnet']) - if 'dhcp' in network['type']: - t = (network['type'] if network['type'].startswith('ipv6') - else 'dhcp4') - subnet.update({ - 'type': t, - }) - else: + + if network['type'] == 'ipv4_dhcp': + subnet.update({'type': 'dhcp4'}) + elif network['type'] == 'ipv6_dhcp': + subnet.update({'type': 'dhcp6'}) + elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless', + 'ipv6_dhcpv6-stateful']: + subnet.update({'type': network['type']}) + elif network['type'] in ['ipv4', 'ipv6']: subnet.update({ 'type': 'static', 'address': network.get('ip_address'), }) + + # Enable accept_ra for stateful and legacy ipv6_dhcp types + if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']: + cfg.update({'accept-ra': True}) + if network['type'] == 'ipv4': subnet['ipv4'] = True if network['type'] == 'ipv6': diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index cfb3b0a7..6f830cc6 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -547,7 +547,8 @@ class TestNetJson(CiTestCase): 'mtu': None, 'name': 'enp0s2', 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], - 'type': 'physical'} + 'type': 'physical', + 'accept-ra': True} ], } conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 35ce55d2..0f45dc38 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1070,6 +1070,143 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_accept_ra': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 1 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + accept-ra: true + dhcp6: true + """).rstrip(' '), + 'yaml_v1': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: true + """).rstrip(' '), + 'yaml_v2': textwrap.dedent("""\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: true + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_reject_ra': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 0 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + accept-ra: false + dhcp6: true + """).rstrip(' '), + 'yaml_v1': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: false + """).rstrip(' '), + 'yaml_v2': textwrap.dedent("""\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: false + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=no + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'ipv6_slaac': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 0 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_slaac'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'dhcpv6_stateless': { 'expected_eni': textwrap.dedent("""\ auto lo @@ -1077,6 +1214,7 @@ NETWORK_CONFIGS = { auto iface0 iface iface0 inet6 auto + dhcp 1 """).rstrip(' '), 'expected_netplan': textwrap.dedent(""" network: @@ -1097,6 +1235,8 @@ NETWORK_CONFIGS = { 'ifcfg-iface0': textwrap.dedent("""\ BOOTPROTO=none DEVICE=iface0 + DHCPV6C=yes + DHCPV6C_OPTIONS=-S IPV6_AUTOCONF=yes IPV6INIT=yes DEVICE=iface0 @@ -1121,6 +1261,7 @@ NETWORK_CONFIGS = { version: 2 ethernets: iface0: + accept-ra: true dhcp6: true """).rstrip(' '), 'yaml': textwrap.dedent("""\ @@ -1130,6 +1271,7 @@ NETWORK_CONFIGS = { name: 'iface0' subnets: - {'type': 'ipv6_dhcpv6-stateful'} + accept-ra: true """).rstrip(' '), 'expected_sysconfig': { 'ifcfg-iface0': textwrap.dedent("""\ @@ -1137,6 +1279,7 @@ NETWORK_CONFIGS = { DEVICE=iface0 DHCPV6C=yes IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes DEVICE=iface0 NM_CONTROLLED=no ONBOOT=yes @@ -2884,6 +3027,34 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_dhcpv6_accept_ra_config_v1(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_accept_ra_config_v2(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_reject_ra_config_v1(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_reject_ra_config_v2(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + def test_dhcpv6_stateless_config(self): entry = NETWORK_CONFIGS['dhcpv6_stateless'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) @@ -4022,6 +4193,46 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_dhcpv6_accept_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_reject_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_ipv6_slaac(self): + entry = NETWORK_CONFIGS['ipv6_slaac'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_stateless(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_stateful(self): + entry = NETWORK_CONFIGS['dhcpv6_stateful'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_all(self): entry = NETWORK_CONFIGS['all'] files = self._render_and_read(network_config=yaml.load(entry['yaml'])) @@ -4154,16 +4365,37 @@ class TestEniRoundTrip(CiTestCase): def testsimple_render_dhcpv6_stateless(self): entry = NETWORK_CONFIGS['dhcpv6_stateless'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_ipv6_slaac(self): + entry = NETWORK_CONFIGS['ipv6_slaac'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) self.assertEqual( entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) def testsimple_render_dhcpv6_stateful(self): entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_accept_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] files = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + entry['yaml_v1'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_reject_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) self.assertEqual( entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) -- cgit v1.2.3 From 310f8605a5fe62dacf8edc63a809a061085bb907 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 21 Nov 2019 13:38:44 -0500 Subject: tests: Fix cloudsigma tests when no dmidecode data is present. (#57) The cloudsigma tests had few test cases that were not getting all the "mocks" set up correctly. Specifically is_running_in_cloudsigma was not getting replaced and calls would leak through to util.read_dmi_data. --- tests/unittests/test_datasource/test_cloudsigma.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index 3bf52e69..d62d542b 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -30,6 +30,8 @@ SERVER_CONTEXT = { } } +DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma' + class CepkoMock(Cepko): def __init__(self, mocked_context): @@ -42,17 +44,15 @@ class CepkoMock(Cepko): class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() - self.add_patch( - "cloudinit.sources.DataSourceCloudSigma.util.is_container", - "m_is_container", return_value=False) self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) + self.add_patch(DS_PATH + '.is_running_in_cloudsigma', + "m_is_container", return_value=True) self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( "", "", paths=self.paths) - self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) - self.datasource.get_data() def test_get_hostname(self): + self.datasource.get_data() self.assertEqual("test_server", self.datasource.get_hostname()) self.datasource.metadata['name'] = '' self.assertEqual("65b2fb23", self.datasource.get_hostname()) @@ -61,23 +61,28 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase): self.assertEqual("65b2fb23", self.datasource.get_hostname()) def test_get_public_ssh_keys(self): + self.datasource.get_data() self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']], self.datasource.get_public_ssh_keys()) def test_get_instance_id(self): + self.datasource.get_data() self.assertEqual(SERVER_CONTEXT['uuid'], self.datasource.get_instance_id()) def test_platform(self): """All platform-related attributes are set.""" + self.datasource.get_data() self.assertEqual(self.datasource.cloud_name, 'cloudsigma') self.assertEqual(self.datasource.platform_type, 'cloudsigma') self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)') def test_metadata(self): + self.datasource.get_data() self.assertEqual(self.datasource.metadata, SERVER_CONTEXT) def test_user_data(self): + self.datasource.get_data() self.assertEqual(self.datasource.userdata_raw, SERVER_CONTEXT['meta']['cloudinit-user-data']) @@ -91,14 +96,13 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase): self.assertEqual(self.datasource.userdata_raw, b'hi world\n') def test_vendor_data(self): + self.datasource.get_data() self.assertEqual(self.datasource.vendordata_raw, SERVER_CONTEXT['vendor_data']['cloudinit']) def test_lack_of_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( - "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() @@ -107,8 +111,6 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def test_lack_of_cloudinit_key_in_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"]["cloudinit"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( - "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() -- cgit v1.2.3 From 4bc399e0cd0b7e9177f948aecd49f6b8323ff30b Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 22 Nov 2019 21:05:44 -0600 Subject: ec2: Add support for AWS IMDS v2 (session-oriented) (#55) * ec2: Add support for AWS IMDS v2 (session-oriented) AWS now supports a new version of fetching Instance Metadata[1]. Update cloud-init's ec2 utility functions and update ec2 derived datasources accordingly. For DataSourceEc2 (versus ec2-look-alikes) cloud-init will issue the PUT request to obtain an API token for the maximum lifetime and then all subsequent interactions with the IMDS will include the token in the header. If the API token endpoint is unreachable on Ec2 platform, log a warning and fallback to using IMDS v1 and which does not use session tokens when communicating with the Instance metadata service. We handle read errors, typically seen if the IMDS is beyond one etwork hop (IMDSv2 responses have a ttl=1), by setting the api token to a disabled value and then using IMDSv1 paths. To support token-based headers, ec2_utils functions were updated to support custom headers_cb and exception_cb callback functions so Ec2 could store, or refresh API tokens in the event of token becoming stale. [1] https://docs.aws.amazon.com/AWSEC2/latest/ \ UserGuide/ec2-instance-metadata.html \ #instance-metadata-v2-how-it-works --- cloudinit/ec2_utils.py | 37 +++-- cloudinit/sources/DataSourceCloudStack.py | 2 +- cloudinit/sources/DataSourceEc2.py | 166 ++++++++++++++++++--- cloudinit/sources/DataSourceExoscale.py | 2 +- cloudinit/sources/DataSourceMAAS.py | 2 +- cloudinit/sources/DataSourceOpenStack.py | 2 +- cloudinit/url_helper.py | 15 +- tests/unittests/test_datasource/test_cloudstack.py | 21 ++- tests/unittests/test_datasource/test_ec2.py | 6 +- 9 files changed, 201 insertions(+), 52 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 3b7b17f1..57708c14 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -134,25 +134,28 @@ class MetadataMaterializer(object): return joined -def _skip_retry_on_codes(status_codes, _request_args, cause): +def skip_retry_on_codes(status_codes, _request_args, cause): """Returns False if cause.code is in status_codes.""" return cause.code not in status_codes def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5): + ssl_details=None, timeout=5, retries=5, + headers_cb=None, exception_cb=None): ud_url = url_helper.combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(ud_url, 'user-data') user_data = '' try: - # It is ok for userdata to not exist (thats why we are stopping if - # NOT_FOUND occurs) and just in that case returning an empty string. - exception_cb = functools.partial(_skip_retry_on_codes, - SKIP_USERDATA_CODES) + if not exception_cb: + # It is ok for userdata to not exist (thats why we are stopping if + # NOT_FOUND occurs) and just in that case returning an empty + # string. + exception_cb = functools.partial(skip_retry_on_codes, + SKIP_USERDATA_CODES) response = url_helper.read_file_or_url( ud_url, ssl_details=ssl_details, timeout=timeout, - retries=retries, exception_cb=exception_cb) + retries=retries, exception_cb=exception_cb, headers_cb=headers_cb) user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: @@ -165,11 +168,13 @@ def get_instance_userdata(api_version='latest', def _get_instance_metadata(tree, api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): + leaf_decoder=None, headers_cb=None, + exception_cb=None): md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial( url_helper.read_file_or_url, ssl_details=ssl_details, - timeout=timeout, retries=retries) + timeout=timeout, retries=retries, headers_cb=headers_cb, + exception_cb=exception_cb) def mcaller(url): return caller(url).contents @@ -191,22 +196,28 @@ def _get_instance_metadata(tree, api_version='latest', def get_instance_metadata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): + leaf_decoder=None, headers_cb=None, + exception_cb=None): # Note, 'meta-data' explicitly has trailing /. # this is required for CloudStack (LP: #1356855) return _get_instance_metadata(tree='meta-data/', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, - retries=retries, leaf_decoder=leaf_decoder) + retries=retries, leaf_decoder=leaf_decoder, + headers_cb=headers_cb, + exception_cb=exception_cb) def get_instance_identity(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): + leaf_decoder=None, headers_cb=None, + exception_cb=None): return _get_instance_metadata(tree='dynamic/instance-identity', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, - retries=retries, leaf_decoder=leaf_decoder) + retries=retries, leaf_decoder=leaf_decoder, + headers_cb=headers_cb, + exception_cb=exception_cb) # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index e333cb50..2013bed7 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -93,7 +93,7 @@ class DataSourceCloudStack(sources.DataSource): urls = [uhelp.combine_url(self.metadata_address, 'latest/meta-data/instance-id')] start_time = time.time() - url = uhelp.wait_for_url( + url, _response = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds, status_cb=LOG.warning) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 1d88c9b1..b9f346a6 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -28,6 +28,10 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" +API_TOKEN_ROUTE = 'latest/api/token' +API_TOKEN_DISABLED = '_ec2_disable_api_token' +AWS_TOKEN_TTL_SECONDS = '21600' + class CloudNames(object): ALIYUN = "aliyun" @@ -62,6 +66,7 @@ class DataSourceEc2(sources.DataSource): url_max_wait = 120 url_timeout = 50 + _api_token = None # API token for accessing the metadata service _network_config = sources.UNSET # Used to cache calculated network cfg v1 # Whether we want to get network configuration from the metadata service. @@ -148,11 +153,12 @@ class DataSourceEc2(sources.DataSource): min_metadata_version. """ # Assumes metadata service is already up + url_tmpl = '{0}/{1}/meta-data/instance-id' + headers = self._get_headers() for api_ver in self.extended_metadata_versions: - url = '{0}/{1}/meta-data/instance-id'.format( - self.metadata_address, api_ver) + url = url_tmpl.format(self.metadata_address, api_ver) try: - resp = uhelp.readurl(url=url) + resp = uhelp.readurl(url=url, headers=headers) except uhelp.UrlError as e: LOG.debug('url %s raised exception %s', url, e) else: @@ -172,12 +178,39 @@ class DataSourceEc2(sources.DataSource): # setup self.identity. So we need to do that now. api_version = self.get_metadata_api_version() self.identity = ec2.get_instance_identity( - api_version, self.metadata_address).get('document', {}) + api_version, self.metadata_address, + headers_cb=self._get_headers, + exception_cb=self._refresh_stale_aws_token_cb).get( + 'document', {}) return self.identity.get( 'instanceId', self.metadata['instance-id']) else: return self.metadata['instance-id'] + def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None): + if self.cloud_name != CloudNames.AWS: + return + + urls = [] + url2base = {} + url_path = API_TOKEN_ROUTE + request_method = 'PUT' + for url in mdurls: + cur = '{0}/{1}'.format(url, url_path) + urls.append(cur) + url2base[cur] = url + + # use the self._status_cb to check for Read errors, which means + # we can't reach the API token URL, so we should disable IMDSv2 + LOG.debug('Fetching Ec2 IMDSv2 API Token') + url, response = uhelp.wait_for_url( + urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, + headers_cb=self._get_headers, request_method=request_method) + + if url and response: + self._api_token = response + return url2base[url] + def wait_for_metadata_service(self): mcfg = self.ds_cfg @@ -199,27 +232,39 @@ class DataSourceEc2(sources.DataSource): LOG.warning("Empty metadata url list! using default list") mdurls = self.metadata_urls - urls = [] - url2base = {} - for url in mdurls: - cur = '{0}/{1}/meta-data/instance-id'.format( - url, self.min_metadata_version) - urls.append(cur) - url2base[cur] = url - - start_time = time.time() - url = uhelp.wait_for_url( - urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warning) - - if url: - self.metadata_address = url2base[url] + # try the api token path first + metadata_address = self._maybe_fetch_api_token(mdurls) + if not metadata_address: + if self._api_token == API_TOKEN_DISABLED: + LOG.warning('Retrying with IMDSv1') + # if we can't get a token, use instance-id path + urls = [] + url2base = {} + url_path = '{ver}/meta-data/instance-id'.format( + ver=self.min_metadata_version) + request_method = 'GET' + for url in mdurls: + cur = '{0}/{1}'.format(url, url_path) + urls.append(cur) + url2base[cur] = url + + start_time = time.time() + url, _ = uhelp.wait_for_url( + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warning, + headers_cb=self._get_headers, request_method=request_method) + + if url: + metadata_address = url2base[url] + + if metadata_address: + self.metadata_address = metadata_address LOG.debug("Using metadata source: '%s'", self.metadata_address) else: LOG.critical("Giving up on md from %s after %s seconds", urls, int(time.time() - start_time)) - return bool(url) + return bool(metadata_address) def device_name_to_device(self, name): # Consult metadata service, that has @@ -376,14 +421,22 @@ class DataSourceEc2(sources.DataSource): return {} api_version = self.get_metadata_api_version() crawled_metadata = {} + if self.cloud_name == CloudNames.AWS: + exc_cb = self._refresh_stale_aws_token_cb + exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb + else: + exc_cb = exc_cb_ud = None try: crawled_metadata['user-data'] = ec2.get_instance_userdata( - api_version, self.metadata_address) + api_version, self.metadata_address, + headers_cb=self._get_headers, exception_cb=exc_cb_ud) crawled_metadata['meta-data'] = ec2.get_instance_metadata( - api_version, self.metadata_address) + api_version, self.metadata_address, + headers_cb=self._get_headers, exception_cb=exc_cb) if self.cloud_name == CloudNames.AWS: identity = ec2.get_instance_identity( - api_version, self.metadata_address) + api_version, self.metadata_address, + headers_cb=self._get_headers, exception_cb=exc_cb) crawled_metadata['dynamic'] = {'instance-identity': identity} except Exception: util.logexc( @@ -393,6 +446,73 @@ class DataSourceEc2(sources.DataSource): crawled_metadata['_metadata_api_version'] = api_version return crawled_metadata + def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS): + """Request new metadata API token. + @param seconds: The lifetime of the token in seconds + + @return: The API token or None if unavailable. + """ + if self.cloud_name != CloudNames.AWS: + return None + LOG.debug("Refreshing Ec2 metadata API token") + request_header = {'X-aws-ec2-metadata-token-ttl-seconds': seconds} + token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) + try: + response = uhelp.readurl( + token_url, headers=request_header, request_method="PUT") + except uhelp.UrlError as e: + LOG.warning( + 'Unable to get API token: %s raised exception %s', + token_url, e) + return None + return response.contents + + def _skip_or_refresh_stale_aws_token_cb(self, msg, exception): + """Callback will not retry on SKIP_USERDATA_CODES or if no token + is available.""" + retry = ec2.skip_retry_on_codes( + ec2.SKIP_USERDATA_CODES, msg, exception) + if not retry: + return False # False raises exception + return self._refresh_stale_aws_token_cb(msg, exception) + + def _refresh_stale_aws_token_cb(self, msg, exception): + """Exception handler for Ec2 to refresh token if token is stale.""" + if isinstance(exception, uhelp.UrlError) and exception.code == 401: + # With _api_token as None, _get_headers will _refresh_api_token. + LOG.debug("Clearing cached Ec2 API token due to expiry") + self._api_token = None + return True # always retry + + def _status_cb(self, msg, exc=None): + LOG.warning(msg) + if 'Read timed out' in msg: + LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1') + self._api_token = API_TOKEN_DISABLED + + def _get_headers(self, url=''): + """Return a dict of headers for accessing a url. + + If _api_token is unset on AWS, attempt to refresh the token via a PUT + and then return the updated token header. + """ + if self.cloud_name != CloudNames.AWS or (self._api_token == + API_TOKEN_DISABLED): + return {} + # Request a 6 hour token if URL is API_TOKEN_ROUTE + request_token_header = { + 'X-aws-ec2-metadata-token-ttl-seconds': AWS_TOKEN_TTL_SECONDS} + if API_TOKEN_ROUTE in url: + return request_token_header + if not self._api_token: + # If we don't yet have an API token, get one via a PUT against + # API_TOKEN_ROUTE. This _api_token may get unset by a 403 due + # to an invalid or expired token + self._api_token = self._refresh_api_token() + if not self._api_token: + return {} + return {'X-aws-ec2-metadata-token': self._api_token} + class DataSourceEc2Local(DataSourceEc2): """Datasource run at init-local which sets up network to query metadata. diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py index 4616daa7..d59aefd1 100644 --- a/cloudinit/sources/DataSourceExoscale.py +++ b/cloudinit/sources/DataSourceExoscale.py @@ -61,7 +61,7 @@ class DataSourceExoscale(sources.DataSource): metadata_url = "{}/{}/meta-data/instance-id".format( self.metadata_url, self.api_version) - url = url_helper.wait_for_url( + url, _response = url_helper.wait_for_url( urls=[metadata_url], max_wait=self.url_max_wait, timeout=self.url_timeout, diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 61aa6d7e..517913aa 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -136,7 +136,7 @@ class DataSourceMAAS(sources.DataSource): url = url[:-1] check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] - url = self.oauth_helper.wait_for_url( + url, _response = self.oauth_helper.wait_for_url( urls=urls, max_wait=max_wait, timeout=timeout) if url: diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 4a015240..7a5e71b6 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -76,7 +76,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): url_params = self.get_url_params() start_time = time.time() - avail_url = url_helper.wait_for_url( + avail_url, _response = url_helper.wait_for_url( urls=md_urls, max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds) if avail_url: diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0f4c36f7..48ddae45 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -101,7 +101,7 @@ def read_file_or_url(url, timeout=5, retries=10, raise UrlError(cause=e, code=code, headers=None, url=url) return FileResponse(file_path, contents=contents) else: - return readurl(url, timeout=timeout, retries=retries, headers=headers, + return readurl(url, timeout=timeout, retries=retries, headers_cb=headers_cb, data=data, sec_between=sec_between, ssl_details=ssl_details, exception_cb=exception_cb) @@ -310,7 +310,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, headers_cb=None, sleep_time=1, - exception_cb=None, sleep_time_cb=None): + exception_cb=None, sleep_time_cb=None, request_method=None): """ urls: a list of urls to try max_wait: roughly the maximum time to wait before giving up @@ -325,6 +325,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, 'exception', the exception that occurred. sleep_time_cb: call method with 2 arguments (response, loop_n) that generates the next sleep time. + request_method: indicate the type of HTTP request, GET, PUT, or POST + returns: tuple of (url, response contents), on failure, (False, None) the idea of this routine is to wait for the EC2 metadata service to come up. On both Eucalyptus and EC2 we have seen the case where @@ -381,8 +383,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, else: headers = {} - response = readurl(url, headers=headers, timeout=timeout, - check_status=False) + response = readurl( + url, headers=headers, timeout=timeout, + check_status=False, request_method=request_method) if not response.contents: reason = "empty response [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, @@ -392,7 +395,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, url_exc = UrlError(ValueError(reason), code=response.code, headers=response.headers, url=url) else: - return url + return url, response.contents except UrlError as e: reason = "request error [%s]" % e url_exc = e @@ -421,7 +424,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, sleep_time) time.sleep(sleep_time) - return False + return False, None class OauthUrlHelper(object): diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index d6d2d6b2..83c2f753 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -10,6 +10,9 @@ from cloudinit.tests.helpers import CiTestCase, ExitStack, mock import os import time +MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' +DS_PATH = MOD_PATH + '.DataSourceCloudStack' + class TestCloudStackPasswordFetching(CiTestCase): @@ -17,7 +20,7 @@ class TestCloudStackPasswordFetching(CiTestCase): super(TestCloudStackPasswordFetching, self).setUp() self.patches = ExitStack() self.addCleanup(self.patches.close) - mod_name = 'cloudinit.sources.DataSourceCloudStack' + mod_name = MOD_PATH self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) default_gw = "192.201.20.0" @@ -56,7 +59,9 @@ class TestCloudStackPasswordFetching(CiTestCase): ds.get_data() self.assertEqual({}, ds.get_config_obj()) - def test_password_sets_password(self): + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_password_sets_password(self, m_wait): + m_wait.return_value = True password = 'SekritSquirrel' self._set_password_server_response(password) ds = DataSourceCloudStack( @@ -64,7 +69,9 @@ class TestCloudStackPasswordFetching(CiTestCase): ds.get_data() self.assertEqual(password, ds.get_config_obj()['password']) - def test_bad_request_doesnt_stop_ds_from_working(self): + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): + m_wait.return_value = True self._set_password_server_response('bad_request') ds = DataSourceCloudStack( {}, None, helpers.Paths({'run_dir': self.tmp})) @@ -79,7 +86,9 @@ class TestCloudStackPasswordFetching(CiTestCase): request_types.append(arg.split()[1]) self.assertEqual(expected_request_types, request_types) - def test_valid_response_means_password_marked_as_saved(self): + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_valid_response_means_password_marked_as_saved(self, m_wait): + m_wait.return_value = True password = 'SekritSquirrel' subp = self._set_password_server_response(password) ds = DataSourceCloudStack( @@ -92,7 +101,9 @@ class TestCloudStackPasswordFetching(CiTestCase): subp = self._set_password_server_response(response_string) ds = DataSourceCloudStack( {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() + with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: + m_wait.return_value = True + ds.get_data() self.assertRequestTypesSent(subp, ['send_my_password']) def test_password_not_saved_if_empty(self): diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 5e1dd777..34a089f2 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -191,7 +191,9 @@ def register_mock_metaserver(base_url, data): register(base_url, 'not found', status=404) def myreg(*argc, **kwargs): - return httpretty.register_uri(httpretty.GET, *argc, **kwargs) + url = argc[0] + method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET + return httpretty.register_uri(method, *argc, **kwargs) register_helper(myreg, base_url, data) @@ -237,6 +239,8 @@ class TestEc2(test_helpers.HttprettyTestCase): if md: all_versions = ( [ds.min_metadata_version] + ds.extended_metadata_versions) + token_url = self.data_url('latest', data_item='api/token') + register_mock_metaserver(token_url, 'API-TOKEN') for version in all_versions: metadata_url = self.data_url(version) + '/' if version == md_version: -- cgit v1.2.3 From f69d33a723b805fec3ee70c3a6127c8cadcb02d8 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 2 Dec 2019 16:24:18 -0700 Subject: url_helper: read_file_or_url should pass headers param into readurl (#66) Headers param was accidentally omitted and no longer passed through to readurl due to a previous commit. To avoid this omission of params in the future, drop positional param definitions from read_file_or_url and pass all kwargs through to readurl when we are not operating on a file. In util:read_seeded, correct the case where invalid positional param file_retries was being passed into read_file_or_url. Also drop duplicated file:// prefix addition from read_seeded because read_file_or_url does that work anyway. LP: #1854084 --- cloudinit/sources/helpers/azure.py | 6 ++- cloudinit/tests/test_url_helper.py | 52 ++++++++++++++++++++++ cloudinit/url_helper.py | 47 +++++++++++++++---- cloudinit/user_data.py | 2 +- cloudinit/util.py | 15 ++----- .../unittests/test_datasource/test_azure_helper.py | 18 +++++--- 6 files changed, 112 insertions(+), 28 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index f1fba175..f5cdb3fd 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -183,14 +183,16 @@ class AzureEndpointHttpClient(object): if secure: headers = self.headers.copy() headers.update(self.extra_secure_headers) - return url_helper.read_file_or_url(url, headers=headers) + return url_helper.read_file_or_url(url, headers=headers, timeout=5, + retries=10) def post(self, url, data=None, extra_headers=None): headers = self.headers if extra_headers is not None: headers = self.headers.copy() headers.update(extra_headers) - return url_helper.read_file_or_url(url, data=data, headers=headers) + return url_helper.read_file_or_url(url, data=data, headers=headers, + timeout=5, retries=10) class GoalState(object): diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index aa9f3ec1..e883ddc2 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -4,6 +4,7 @@ from cloudinit.url_helper import ( NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) from cloudinit.tests.helpers import CiTestCase, mock, skipIf from cloudinit import util +from cloudinit import version import httpretty import requests @@ -17,6 +18,9 @@ except ImportError: _missing_oauthlib_dep = True +M_PATH = 'cloudinit.url_helper.' + + class TestOAuthHeaders(CiTestCase): def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): @@ -67,6 +71,54 @@ class TestReadFileOrUrl(CiTestCase): self.assertEqual(result.contents, data) self.assertEqual(str(result), data.decode('utf-8')) + @mock.patch(M_PATH + 'readurl') + def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): + """read_file_or_url passes all params through to readurl.""" + url = 'http://hostname/path' + response = 'This is my url content\n' + m_readurl.return_value = response + params = {'url': url, 'timeout': 1, 'retries': 2, + 'headers': {'somehdr': 'val'}, + 'data': 'data', 'sec_between': 1, + 'ssl_details': {'cert_file': '/path/cert.pem'}, + 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} + self.assertEqual(response, read_file_or_url(**params)) + params.pop('url') # url is passed in as a positional arg + self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) + + def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): + """Readurl param defaults used when unspecified by read_file_or_url + + Param defaults tested are as follows: + retries: 0, additional headers None beyond default, method: GET, + data: None, check_status: True and allow_redirects: True + """ + url = 'http://hostname/path' + + m_response = mock.MagicMock() + + class FakeSession(requests.Session): + def request(cls, **kwargs): + self.assertEqual( + {'url': url, 'allow_redirects': True, 'method': 'GET', + 'headers': { + 'User-Agent': 'Cloud-Init/%s' % ( + version.version_string())}}, + kwargs) + return m_response + + with mock.patch(M_PATH + 'requests.Session') as m_session: + error = requests.exceptions.HTTPError('broke') + m_session.side_effect = [error, FakeSession()] + # assert no retries and check_status == True + with self.assertRaises(UrlError) as context_manager: + response = read_file_or_url(url) + self.assertEqual('broke', str(context_manager.exception)) + # assert default headers, method, url and allow_redirects True + # Success on 2nd call with FakeSession + response = read_file_or_url(url) + self.assertEqual(m_response, response._response) + class TestRetryOnUrlExc(CiTestCase): diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 48ddae45..1496a471 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -81,14 +81,19 @@ def combine_url(base, *add_ons): return url -def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, ssl_details=None, - headers_cb=None, exception_cb=None): +def read_file_or_url(url, **kwargs): + """Wrapper function around readurl to allow passing a file path as url. + + When url is not a local file path, passthrough any kwargs to readurl. + + In the case of parameter passthrough to readurl, default values for some + parameters. See: call-signature of readurl in this module for param docs. + """ url = url.lstrip() if url.startswith("/"): url = "file://%s" % url if url.lower().startswith("file://"): - if data: + if kwargs.get("data"): LOG.warning("Unable to post data to file resource %s", url) file_path = url[len("file://"):] try: @@ -101,10 +106,7 @@ def read_file_or_url(url, timeout=5, retries=10, raise UrlError(cause=e, code=code, headers=None, url=url) return FileResponse(file_path, contents=contents) else: - return readurl(url, timeout=timeout, retries=retries, - headers_cb=headers_cb, data=data, - sec_between=sec_between, ssl_details=ssl_details, - exception_cb=exception_cb) + return readurl(url, **kwargs) # Made to have same accessors as UrlResponse so that the @@ -201,6 +203,35 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, check_status=True, allow_redirects=True, exception_cb=None, session=None, infinite=False, log_req_resp=True, request_method=None): + """Wrapper around requests.Session to read the url and retry if necessary + + :param url: Mandatory url to request. + :param data: Optional form data to post the URL. Will set request_method + to 'POST' if present. + :param timeout: Timeout in seconds to wait for a response + :param retries: Number of times to retry on exception if exception_cb is + None or exception_cb returns True for the exception caught. Default is + to fail with 0 retries on exception. + :param sec_between: Default 1: amount of seconds passed to time.sleep + between retries. None or -1 means don't sleep. + :param headers: Optional dict of headers to send during request + :param headers_cb: Optional callable returning a dict of values to send as + headers during request + :param ssl_details: Optional dict providing key_file, ca_certs, and + cert_file keys for use on in ssl connections. + :param check_status: Optional boolean set True to raise when HTTPError + occurs. Default: True. + :param allow_redirects: Optional boolean passed straight to Session.request + as 'allow_redirects'. Default: True. + :param exception_cb: Optional callable which accepts the params + msg and exception and returns a boolean True if retries are permitted. + :param session: Optional exiting requests.Session instance to reuse. + :param infinite: Bool, set True to retry indefinitely. Default: False. + :param log_req_resp: Set False to turn off verbose debug messages. + :param request_method: String passed as 'method' to Session.request. + Typically GET, or POST. Default: POST if data is provided, GET + otherwise. + """ url = _cleanurl(url) req_args = { 'url': url, diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index ed83d2d8..15af1daf 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -224,7 +224,7 @@ class UserDataProcessor(object): content = util.load_file(include_once_fn) else: try: - resp = read_file_or_url(include_url, + resp = read_file_or_url(include_url, timeout=5, retries=10, ssl_details=self.ssl_details) if include_once_on and resp.ok(): util.write_file(include_once_fn, resp.contents, diff --git a/cloudinit/util.py b/cloudinit/util.py index 78b6a2d0..9d9d5c72 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -986,13 +986,6 @@ def load_yaml(blob, default=None, allowed=(dict,)): def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): - if base.startswith("/"): - base = "file://%s" % base - - # default retries for file is 0. for network is 10 - if base.startswith("file://"): - retries = file_retries - if base.find("%s") >= 0: ud_url = base % ("user-data" + ext) md_url = base % ("meta-data" + ext) @@ -1000,14 +993,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): ud_url = "%s%s%s" % (base, "user-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) - md_resp = url_helper.read_file_or_url(md_url, timeout, retries, - file_retries) + md_resp = url_helper.read_file_or_url(md_url, timeout=timeout, + retries=retries) md = None if md_resp.ok(): md = load_yaml(decode_binary(md_resp.contents), default={}) - ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, - file_retries) + ud_resp = url_helper.read_file_or_url(ud_url, timeout=timeout, + retries=retries) ud = None if ud_resp.ok(): ud = ud_resp.contents diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index bd006aba..bd17f636 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -212,8 +212,10 @@ class TestAzureEndpointHttpClient(CiTestCase): response = client.get(url, secure=False) self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=self.regular_headers), - self.read_file_or_url.call_args) + self.assertEqual( + mock.call(url, headers=self.regular_headers, retries=10, + timeout=5), + self.read_file_or_url.call_args) def test_secure_get(self): url = 'MyTestUrl' @@ -227,8 +229,10 @@ class TestAzureEndpointHttpClient(CiTestCase): response = client.get(url, secure=True) self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=expected_headers), - self.read_file_or_url.call_args) + self.assertEqual( + mock.call(url, headers=expected_headers, retries=10, + timeout=5), + self.read_file_or_url.call_args) def test_post(self): data = mock.MagicMock() @@ -238,7 +242,8 @@ class TestAzureEndpointHttpClient(CiTestCase): self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) self.assertEqual( - mock.call(url, data=data, headers=self.regular_headers), + mock.call(url, data=data, headers=self.regular_headers, retries=10, + timeout=5), self.read_file_or_url.call_args) def test_post_with_extra_headers(self): @@ -250,7 +255,8 @@ class TestAzureEndpointHttpClient(CiTestCase): expected_headers = self.regular_headers.copy() expected_headers.update(extra_headers) self.assertEqual( - mock.call(mock.ANY, data=mock.ANY, headers=expected_headers), + mock.call(mock.ANY, data=mock.ANY, headers=expected_headers, + retries=10, timeout=5), self.read_file_or_url.call_args) -- cgit v1.2.3 From 129b1c4ea250619bd7caed7aaffacc796b0139f2 Mon Sep 17 00:00:00 2001 From: AOhassan <37305877+AOhassan@users.noreply.github.com> Date: Thu, 12 Dec 2019 13:51:42 -0800 Subject: azure: avoid re-running cloud-init when instance-id is byte-swapped (#84) Azure stores the instance ID with an incorrect byte ordering for the first three hyphen delimited parts. This results in invalid is_new_instance checks forcing Azure datasource to recrawl the metadata service. When persisting instance-id from the metadata service, swap the instance-id string byte order such that it is consistent with that returned by dmi information. Check whether the instance-id string is a byte-swapped match when determining correctly whether the Azure platform instance-id has actually changed. --- cloudinit/sources/DataSourceAzure.py | 16 ++++++++++--- cloudinit/sources/helpers/azure.py | 27 ++++++++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++--- .../unittests/test_datasource/test_azure_helper.py | 19 +++++++++++++++ 4 files changed, 80 insertions(+), 6 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 87a848ce..24f448c5 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -33,7 +33,8 @@ from cloudinit.sources.helpers.azure import ( get_boot_telemetry, get_system_info, report_diagnostic_event, - EphemeralDHCPv4WithReporting) + EphemeralDHCPv4WithReporting, + is_byte_swapped) LOG = logging.getLogger(__name__) @@ -471,8 +472,7 @@ class DataSourceAzure(sources.DataSource): seed = _get_random_seed() if seed: crawled_data['metadata']['random_seed'] = seed - crawled_data['metadata']['instance-id'] = util.read_dmi_data( - 'system-uuid') + crawled_data['metadata']['instance-id'] = self._iid() if perform_reprovision: LOG.info("Reporting ready to Azure after getting ReprovisionData") @@ -558,6 +558,16 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def _iid(self, previous=None): + prev_iid_path = os.path.join( + self.paths.get_cpath('data'), 'instance-id') + iid = util.read_dmi_data('system-uuid') + if os.path.exists(prev_iid_path): + previous = util.load_file(prev_iid_path).strip() + if is_byte_swapped(previous, iid): + return previous + return iid + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index f5cdb3fd..fc760581 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -7,6 +7,7 @@ import re import socket import struct import time +import textwrap from cloudinit.net import dhcp from cloudinit import stages @@ -48,6 +49,32 @@ def azure_ds_telemetry_reporter(func): return impl +def is_byte_swapped(previous_id, current_id): + """ + Azure stores the instance ID with an incorrect byte ordering for the + first parts. This corrects the byte order such that it is consistent with + that returned by the metadata service. + """ + if previous_id == current_id: + return False + + def swap_bytestring(s, width=2): + dd = [byte for byte in textwrap.wrap(s, 2)] + dd.reverse() + return ''.join(dd) + + parts = current_id.split('-') + swapped_id = '-'.join([ + swap_bytestring(parts[0]), + swap_bytestring(parts[1]), + swap_bytestring(parts[2]), + parts[3], + parts[4] + ]) + + return previous_id == swapped_id + + @azure_ds_telemetry_reporter def get_boot_telemetry(): """Report timestamps related to kernel initialization and systemd diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 59e351de..a809fd87 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -477,7 +477,7 @@ scbus-1 on xpt0 bus 0 'public-keys': [], }) - self.instance_id = 'test-instance-id' + self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' def _dmi_mocks(key): if key == 'system-uuid': @@ -645,7 +645,7 @@ scbus-1 on xpt0 bus 0 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, 'imds': NETWORK_METADATA, - 'instance-id': 'test-instance-id', + 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -1091,6 +1091,24 @@ scbus-1 on xpt0 bus 0 self.assertTrue(ret) self.assertEqual('value', dsrc.metadata['test']) + def test_instance_id_endianness(self): + """Return the previous iid when dmi uuid is the byteswapped iid.""" + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + # byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + # not byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + def test_instance_id_from_dmidecode_used(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) ds.get_data() @@ -1292,7 +1310,7 @@ class TestAzureBounce(CiTestCase): def _dmi_mocks(key): if key == 'system-uuid': - return 'test-instance-id' + return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' elif key == 'chassis-asset-tag': return '7783-7084-3265-9085-8269-3286-77' raise RuntimeError('should not get here') diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index bd17f636..007df09f 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -170,6 +170,25 @@ class TestGoalStateParsing(CiTestCase): goal_state = self._get_goal_state(instance_id=instance_id) self.assertEqual(instance_id, goal_state.instance_id) + def test_instance_id_byte_swap(self): + """Return true when previous_iid is byteswapped current_iid""" + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" + self.assertTrue( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_same_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_diff_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + def test_certificates_xml_parsed_and_fetched_correctly(self): http_client = mock.MagicMock() certificates_url = 'TestCertificatesUrl' -- cgit v1.2.3 From e03c71aed91cbcbfb37cfadbf63b21a2d7e61a52 Mon Sep 17 00:00:00 2001 From: Adam Dobrawy Date: Tue, 17 Dec 2019 17:09:22 +0100 Subject: tests: Add tests for value of dsname in datasources --- tests/unittests/test_datasource/test_common.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'tests/unittests/test_datasource') diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 61a7a762..4ab5d471 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -4,6 +4,7 @@ from cloudinit import settings from cloudinit import sources from cloudinit import type_utils from cloudinit.sources import ( + DataSource, DataSourceAliYun as AliYun, DataSourceAltCloud as AltCloud, DataSourceAzure as Azure, @@ -23,6 +24,7 @@ from cloudinit.sources import ( DataSourceOpenStack as OpenStack, DataSourceOracle as Oracle, DataSourceOVF as OVF, + DataSourceRbxCloud as RbxCloud, DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, ) @@ -44,6 +46,7 @@ DEFAULT_LOCAL = [ SmartOS.DataSourceSmartOS, Ec2.DataSourceEc2Local, OpenStack.DataSourceOpenStackLocal, + RbxCloud.DataSourceRbxCloud, Scaleway.DataSourceScaleway, ] @@ -86,7 +89,6 @@ class ExpectedDataSources(test_helpers.TestCase): class TestDataSourceInvariants(test_helpers.TestCase): - def test_data_sources_have_valid_network_config_sources(self): for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: for cfg_src in ds.network_config_sources: @@ -95,5 +97,14 @@ class TestDataSourceInvariants(test_helpers.TestCase): self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), fail_msg) + def test_expected_dsname_defined(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + fail_msg = ( + '{} has an invalid / missing dsname property: {}'.format( + str(ds), str(ds.dsname) + ) + ) + self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg) + self.assertIsNotNone(ds.dsname) # vi: ts=4 expandtab -- cgit v1.2.3 From bb71a9d08d25193836eda91c328760305285574e Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 21 Jan 2020 18:02:42 -0500 Subject: Drop most of the remaining use of six (#179) --- cloudinit/config/cc_chef.py | 4 +-- cloudinit/config/cc_mcollective.py | 10 +++---- cloudinit/config/cc_ntp.py | 20 ++++++------- cloudinit/config/cc_power_state_change.py | 9 +++--- cloudinit/config/cc_rsyslog.py | 7 ++--- cloudinit/config/cc_ubuntu_advantage.py | 4 +-- cloudinit/config/cc_write_files.py | 3 +- cloudinit/config/cc_yum_add_repo.py | 12 +++----- cloudinit/distros/__init__.py | 13 ++++----- cloudinit/distros/freebsd.py | 7 ++--- cloudinit/distros/parsers/sys_conf.py | 6 ++-- cloudinit/distros/ug_util.py | 22 +++++++-------- cloudinit/net/network_state.py | 11 +++----- cloudinit/net/renderer.py | 4 +-- cloudinit/net/sysconfig.py | 15 +++++----- cloudinit/sources/tests/test_init.py | 33 +--------------------- cloudinit/sources/tests/test_oracle.py | 3 +- cloudinit/stages.py | 6 ++-- cloudinit/tests/helpers.py | 15 +++++----- tests/unittests/test_cli.py | 16 +++++------ tests/unittests/test_datasource/test_smartos.py | 4 +-- tests/unittests/test_handler/test_handler_chef.py | 3 +- .../test_handler/test_handler_write_files.py | 15 +++++----- tests/unittests/test_log.py | 11 ++++---- tests/unittests/test_merging.py | 6 ++-- tests/unittests/test_util.py | 17 ++++++----- 26 files changed, 104 insertions(+), 172 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 0ad6b7f1..01d61fa1 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -79,8 +79,6 @@ from cloudinit import templater from cloudinit import url_helper from cloudinit import util -import six - RUBY_VERSION_DEFAULT = "1.8" CHEF_DIRS = tuple([ @@ -273,7 +271,7 @@ def run_chef(chef_cfg, log): cmd_args = chef_cfg['exec_arguments'] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) - elif isinstance(cmd_args, six.string_types): + elif isinstance(cmd_args, str): cmd.append(cmd_args) else: log.warning("Unknown type %s provided for chef" diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index d5f63f5f..351183f1 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -49,9 +49,7 @@ private certificates for mcollective. Their values will be written to """ import errno - -import six -from six import BytesIO +import io # Used since this can maintain comments # and doesn't need a top level section @@ -73,7 +71,7 @@ def configure(config, server_cfg=SERVER_CFG, # original file in order to be able to mix the rest up. try: old_contents = util.load_file(server_cfg, quiet=False, decode=False) - mcollective_config = ConfigObj(BytesIO(old_contents)) + mcollective_config = ConfigObj(io.BytesIO(old_contents)) except IOError as e: if e.errno != errno.ENOENT: raise @@ -93,7 +91,7 @@ def configure(config, server_cfg=SERVER_CFG, 'plugin.ssl_server_private'] = pricert_file mcollective_config['securityprovider'] = 'ssl' else: - if isinstance(cfg, six.string_types): + if isinstance(cfg, str): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): @@ -119,7 +117,7 @@ def configure(config, server_cfg=SERVER_CFG, raise # Now we got the whole (new) file, write to disk... - contents = BytesIO() + contents = io.BytesIO() mcollective_config.write(contents) util.write_file(server_cfg, contents.getvalue(), mode=0o644) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 9e074bda..5498bbaa 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -6,19 +6,17 @@ """NTP: enable and configure ntp""" -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +import copy +import os +from textwrap import dedent + from cloudinit import log as logging -from cloudinit.settings import PER_INSTANCE from cloudinit import temp_utils from cloudinit import templater from cloudinit import type_utils from cloudinit import util - -import copy -import os -import six -from textwrap import dedent +from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -460,7 +458,7 @@ def supplemental_schema_validation(ntp_config): for key, value in sorted(ntp_config.items()): keypath = 'ntp:config:' + key if key == 'confpath': - if not all([value, isinstance(value, six.string_types)]): + if not all([value, isinstance(value, str)]): errors.append( 'Expected a config file path {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) @@ -472,11 +470,11 @@ def supplemental_schema_validation(ntp_config): elif key in ('template', 'template_name'): if value is None: # Either template or template_name can be none continue - if not isinstance(value, six.string_types): + if not isinstance(value, str): errors.append( 'Expected a string type for {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) - elif not isinstance(value, six.string_types): + elif not isinstance(value, str): errors.append( 'Expected a string type for {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 43a479cf..3e81a3c7 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -49,16 +49,15 @@ key returns 0. condition: """ -from cloudinit.settings import PER_INSTANCE -from cloudinit import util - import errno import os import re -import six import subprocess import time +from cloudinit.settings import PER_INSTANCE +from cloudinit import util + frequency = PER_INSTANCE EXIT_FAIL = 254 @@ -183,7 +182,7 @@ def load_power_state(cfg): pstate['timeout']) condition = pstate.get("condition", True) - if not isinstance(condition, six.string_types + (list, bool)): + if not isinstance(condition, (str, list, bool)): raise TypeError("condition type %s invalid. must be list, bool, str") return (args, timeout, condition) diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index ff211f65..5df0137d 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -180,7 +180,6 @@ config entries. Legacy to new mappings are as follows: import os import re -import six from cloudinit import log as logging from cloudinit import util @@ -233,9 +232,9 @@ def load_config(cfg): fillup = ( (KEYNAME_CONFIGS, [], list), - (KEYNAME_DIR, DEF_DIR, six.string_types), - (KEYNAME_FILENAME, DEF_FILENAME, six.string_types), - (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)), + (KEYNAME_DIR, DEF_DIR, str), + (KEYNAME_FILENAME, DEF_FILENAME, str), + (KEYNAME_RELOAD, DEF_RELOAD, (str, list)), (KEYNAME_REMOTES, DEF_REMOTES, dict)) for key, default, vtypes in fillup: diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index f846e9a5..8b6d2a1a 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -4,8 +4,6 @@ from textwrap import dedent -import six - from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit import log as logging @@ -98,7 +96,7 @@ def configure_ua(token=None, enable=None): if enable is None: enable = [] - elif isinstance(enable, six.string_types): + elif isinstance(enable, str): LOG.warning('ubuntu_advantage: enable should be a list, not' ' a string; treating as a single enable') enable = [enable] diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 0b6546e2..bd87e9e5 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -57,7 +57,6 @@ binary gzip data can be specified and will be decoded before being written. import base64 import os -import six from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE @@ -126,7 +125,7 @@ def decode_perms(perm, default): if perm is None: return default try: - if isinstance(perm, six.integer_types + (float,)): + if isinstance(perm, (int, float)): # Just 'downcast' it (if a float) return int(perm) else: diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 3b354a7d..3673166a 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -30,13 +30,9 @@ entry, the config entry will be skipped. # any repository configuration options (see man yum.conf) """ +import io import os - -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser -import six +from configparser import ConfigParser from cloudinit import util @@ -57,7 +53,7 @@ def _format_repo_value(val): # Can handle 'lists' in certain cases # See: https://linux.die.net/man/5/yum.conf return "\n".join([_format_repo_value(v) for v in val]) - if not isinstance(val, six.string_types): + if not isinstance(val, str): return str(val) return val @@ -72,7 +68,7 @@ def _format_repository_config(repo_id, repo_config): # For now assume that people using this know # the format of yum and don't verify keys/values further to_be.set(repo_id, k, _format_repo_value(v)) - to_be_stream = six.StringIO() + to_be_stream = io.StringIO() to_be.write(to_be_stream) to_be_stream.seek(0) lines = to_be_stream.readlines() diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index cdce26f2..92598a2d 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -9,13 +9,11 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six -from six import StringIO - import abc import os import re import stat +from io import StringIO from cloudinit import importer from cloudinit import log as logging @@ -53,8 +51,7 @@ _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate'] -@six.add_metaclass(abc.ABCMeta) -class Distro(object): +class Distro(metaclass=abc.ABCMeta): usr_lib_exec = "/usr/lib" hosts_fn = "/etc/hosts" @@ -429,7 +426,7 @@ class Distro(object): # support kwargs having groups=[list] or groups="g1,g2" groups = kwargs.get('groups') if groups: - if isinstance(groups, six.string_types): + if isinstance(groups, str): groups = groups.split(",") # remove any white spaces in group names, most likely @@ -544,7 +541,7 @@ class Distro(object): if 'ssh_authorized_keys' in kwargs: # Try to handle this in a smart manner. keys = kwargs['ssh_authorized_keys'] - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] elif isinstance(keys, dict): keys = list(keys.values()) @@ -668,7 +665,7 @@ class Distro(object): if isinstance(rules, (list, tuple)): for rule in rules: lines.append("%s %s" % (user, rule)) - elif isinstance(rules, six.string_types): + elif isinstance(rules, str): lines.append("%s %s" % (user, rules)) else: msg = "Can not create sudoers rule addition with type %r" diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 40e435e7..026d1142 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -5,10 +5,8 @@ # This file is part of cloud-init. See LICENSE file for license information. import os -import six -from six import StringIO - import re +from io import StringIO from cloudinit import distros from cloudinit import helpers @@ -108,8 +106,7 @@ class Distro(distros.Distro): } for key, val in kwargs.items(): - if (key in pw_useradd_opts and val and - isinstance(val, six.string_types)): + if key in pw_useradd_opts and val and isinstance(val, str): pw_useradd_cmd.extend([pw_useradd_opts[key], val]) elif key in pw_useradd_flags and val: diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index 44df17de..dee4c551 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -4,11 +4,9 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six -from six import StringIO - import pipes import re +from io import StringIO # This library is used to parse/write # out the various sysconfig files edited (best attempt effort) @@ -65,7 +63,7 @@ class SysConf(configobj.ConfigObj): return out_contents.getvalue() def _quote(self, value, multiline=False): - if not isinstance(value, six.string_types): + if not isinstance(value, str): raise ValueError('Value "%s" is not a string' % (value)) if len(value) == 0: return '' diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py index 9378dd78..08446a95 100755 --- a/cloudinit/distros/ug_util.py +++ b/cloudinit/distros/ug_util.py @@ -9,8 +9,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - from cloudinit import log as logging from cloudinit import type_utils from cloudinit import util @@ -29,7 +27,7 @@ LOG = logging.getLogger(__name__) # is the standard form used in the rest # of cloud-init def _normalize_groups(grp_cfg): - if isinstance(grp_cfg, six.string_types): + if isinstance(grp_cfg, str): grp_cfg = grp_cfg.strip().split(",") if isinstance(grp_cfg, list): c_grp_cfg = {} @@ -39,7 +37,7 @@ def _normalize_groups(grp_cfg): if k not in c_grp_cfg: if isinstance(v, list): c_grp_cfg[k] = list(v) - elif isinstance(v, six.string_types): + elif isinstance(v, str): c_grp_cfg[k] = [v] else: raise TypeError("Bad group member type %s" % @@ -47,12 +45,12 @@ def _normalize_groups(grp_cfg): else: if isinstance(v, list): c_grp_cfg[k].extend(v) - elif isinstance(v, six.string_types): + elif isinstance(v, str): c_grp_cfg[k].append(v) else: raise TypeError("Bad group member type %s" % type_utils.obj_name(v)) - elif isinstance(i, six.string_types): + elif isinstance(i, str): if i not in c_grp_cfg: c_grp_cfg[i] = [] else: @@ -89,7 +87,7 @@ def _normalize_users(u_cfg, def_user_cfg=None): if isinstance(u_cfg, dict): ad_ucfg = [] for (k, v) in u_cfg.items(): - if isinstance(v, (bool, int, float) + six.string_types): + if isinstance(v, (bool, int, float, str)): if util.is_true(v): ad_ucfg.append(str(k)) elif isinstance(v, dict): @@ -99,12 +97,12 @@ def _normalize_users(u_cfg, def_user_cfg=None): raise TypeError(("Unmappable user value type %s" " for key %s") % (type_utils.obj_name(v), k)) u_cfg = ad_ucfg - elif isinstance(u_cfg, six.string_types): + elif isinstance(u_cfg, str): u_cfg = util.uniq_merge_sorted(u_cfg) users = {} for user_config in u_cfg: - if isinstance(user_config, (list,) + six.string_types): + if isinstance(user_config, (list, str)): for u in util.uniq_merge(user_config): if u and u not in users: users[u] = {} @@ -209,7 +207,7 @@ def normalize_users_groups(cfg, distro): old_user = cfg['user'] # Translate it into the format that is more useful # going forward - if isinstance(old_user, six.string_types): + if isinstance(old_user, str): old_user = { 'name': old_user, } @@ -238,7 +236,7 @@ def normalize_users_groups(cfg, distro): default_user_config = util.mergemanydict([old_user, distro_user_config]) base_users = cfg.get('users', []) - if not isinstance(base_users, (list, dict) + six.string_types): + if not isinstance(base_users, (list, dict, str)): LOG.warning(("Format for 'users' key must be a comma separated string" " or a dictionary or a list and not %s"), type_utils.obj_name(base_users)) @@ -252,7 +250,7 @@ def normalize_users_groups(cfg, distro): base_users.append({'name': 'default'}) elif isinstance(base_users, dict): base_users['default'] = dict(base_users).get('default', True) - elif isinstance(base_users, six.string_types): + elif isinstance(base_users, str): # Just append it on to be re-parsed later base_users += ",default" diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 9b126100..63d6e291 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -10,8 +10,6 @@ import logging import socket import struct -import six - from cloudinit import safeyaml from cloudinit import util @@ -186,7 +184,7 @@ class NetworkState(object): def iter_interfaces(self, filter_func=None): ifaces = self._network_state.get('interfaces', {}) - for iface in six.itervalues(ifaces): + for iface in ifaces.values(): if filter_func is None: yield iface else: @@ -220,8 +218,7 @@ class NetworkState(object): ) -@six.add_metaclass(CommandHandlerMeta) -class NetworkStateInterpreter(object): +class NetworkStateInterpreter(metaclass=CommandHandlerMeta): initial_network_state = { 'interfaces': {}, @@ -970,7 +967,7 @@ def ipv4_mask_to_net_prefix(mask): """ if isinstance(mask, int): return mask - if isinstance(mask, six.string_types): + if isinstance(mask, str): try: return int(mask) except ValueError: @@ -997,7 +994,7 @@ def ipv6_mask_to_net_prefix(mask): if isinstance(mask, int): return mask - if isinstance(mask, six.string_types): + if isinstance(mask, str): try: return int(mask) except ValueError: diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py index 5f32e90f..2a61a7a8 100644 --- a/cloudinit/net/renderer.py +++ b/cloudinit/net/renderer.py @@ -6,7 +6,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -import six +import io from .network_state import parse_net_config_data from .udev import generate_udev_rule @@ -34,7 +34,7 @@ class Renderer(object): """Given state, emit udev rules to map mac to ifname.""" # TODO(harlowja): this seems shared between eni renderer and # this, so move it to a shared location. - content = six.StringIO() + content = io.StringIO() for iface in network_state.iter_interfaces(filter_by_physical): # for physical interfaces write out a persist net udev rule if 'name' in iface and iface.get('mac_address'): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 3e06af01..07668d3e 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -1,16 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. +import io import os import re -import six +from configobj import ConfigObj -from cloudinit.distros.parsers import networkmanager_conf -from cloudinit.distros.parsers import resolv_conf from cloudinit import log as logging from cloudinit import util - -from configobj import ConfigObj +from cloudinit.distros.parsers import networkmanager_conf +from cloudinit.distros.parsers import resolv_conf from . import renderer from .network_state import ( @@ -96,7 +95,7 @@ class ConfigMap(object): return len(self._conf) def to_string(self): - buf = six.StringIO() + buf = io.StringIO() buf.write(_make_header()) if self._conf: buf.write("\n") @@ -104,7 +103,7 @@ class ConfigMap(object): value = self._conf[key] if isinstance(value, bool): value = self._bool_map[value] - if not isinstance(value, six.string_types): + if not isinstance(value, str): value = str(value) buf.write("%s=%s\n" % (key, _quote_value(value))) return buf.getvalue() @@ -150,7 +149,7 @@ class Route(ConfigMap): # only accept ipv4 and ipv6 if proto not in ['ipv4', 'ipv6']: raise ValueError("Unknown protocol '%s'" % (str(proto))) - buf = six.StringIO() + buf = io.StringIO() buf.write(_make_header()) if self._conf: buf.write("\n") diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 9698261b..f73b37ed 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -3,7 +3,6 @@ import copy import inspect import os -import six import stat from cloudinit.event import EventType @@ -13,7 +12,7 @@ from cloudinit.sources import ( EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, canonical_cloud_id, redact_sensitive_keys) -from cloudinit.tests.helpers import CiTestCase, skipIf, mock +from cloudinit.tests.helpers import CiTestCase, mock from cloudinit.user_data import UserDataProcessor from cloudinit import util @@ -422,7 +421,6 @@ class TestDataSource(CiTestCase): {'network_json': 'is good'}, instance_data['ds']['network_json']) - @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes") def test_get_data_base64encodes_unserializable_bytes(self): """On py3, get_data base64encodes any unserializable content.""" tmp = self.tmp_dir() @@ -440,35 +438,6 @@ class TestDataSource(CiTestCase): {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, instance_json['ds']['meta_data']) - @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes") - def test_get_data_handles_bytes_values(self): - """On py2 get_data handles bytes values without having to b64encode.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) - self.assertTrue(datasource.get_data()) - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - content = util.load_file(json_file) - instance_json = util.load_json(content) - self.assertEqual([], instance_json['base64_encoded_keys']) - self.assertEqual( - {'key1': 'val1', 'key2': {'key2.1': '\x123'}}, - instance_json['ds']['meta_data']) - - @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") - def test_non_utf8_encoding_gets_b64encoded(self): - """When non-utf-8 values exist in py2 instance-data is b64encoded.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) - self.assertTrue(datasource.get_data()) - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - instance_json = util.load_json(util.load_file(json_file)) - key21_value = instance_json['ds']['meta_data']['key2']['key2.1'] - self.assertEqual('ci-b64:' + util.b64e(b'ab\xaadef'), key21_value) - def test_get_hostname_subclass_support(self): """Validate get_hostname signature on all subclasses of DataSource.""" # Use inspect.getfullargspec when we drop py2.6 and py2.7 diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 85b6db97..6c551fcb 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -13,7 +13,6 @@ import httpretty import json import mock import os -import six import uuid DS_PATH = "cloudinit.sources.DataSourceOracle" @@ -334,7 +333,7 @@ class TestReadMetaData(test_helpers.HttprettyTestCase): for k, v in data.items(): httpretty.register_uri( httpretty.GET, self.mdurl + MD_VER + "/" + k, - v if not isinstance(v, six.text_type) else v.encode('utf-8')) + v if not isinstance(v, str) else v.encode('utf-8')) def test_broken_no_sys_uuid(self, m_read_system_uuid): """Datasource requires ability to read system_uuid and true return.""" diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 71f3a49e..db8ba64c 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -6,11 +6,9 @@ import copy import os +import pickle import sys -import six -from six.moves import cPickle as pickle - from cloudinit.settings import ( FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG) @@ -758,7 +756,7 @@ class Modules(object): for item in cfg_mods: if not item: continue - if isinstance(item, six.string_types): + if isinstance(item, str): module_list.append({ 'mod': item.strip(), }) diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 4dad2afd..0220648d 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -4,6 +4,7 @@ from __future__ import print_function import functools import httpretty +import io import logging import os import random @@ -14,7 +15,6 @@ import tempfile import time import mock -import six import unittest2 from unittest2.util import strclass @@ -72,7 +72,7 @@ def retarget_many_wrapper(new_base, am, old_func): # Python 3 some of these now accept file-descriptors (integers). # That breaks rebase_path() so in lieu of a better solution, just # don't rebase if we get a fd. - if isinstance(path, six.string_types): + if isinstance(path, str): n_args[i] = rebase_path(path, new_base) return old_func(*n_args, **kwds) return wrapper @@ -149,7 +149,7 @@ class CiTestCase(TestCase): if self.with_logs: # Create a log handler so unit tests can search expected logs. self.logger = logging.getLogger() - self.logs = six.StringIO() + self.logs = io.StringIO() formatter = logging.Formatter('%(levelname)s: %(message)s') handler = logging.StreamHandler(self.logs) handler.setFormatter(formatter) @@ -166,7 +166,7 @@ class CiTestCase(TestCase): else: cmd = args[0] - if not isinstance(cmd, six.string_types): + if not isinstance(cmd, str): cmd = cmd[0] pass_through = False if not isinstance(self.allowed_subp, (list, bool)): @@ -346,8 +346,9 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchOpen(self, new_root): trap_func = retarget_many_wrapper(new_root, 1, open) - name = 'builtins.open' if six.PY3 else '__builtin__.open' - self.patched_funcs.enter_context(mock.patch(name, trap_func)) + self.patched_funcs.enter_context( + mock.patch('builtins.open', trap_func) + ) def patchStdoutAndStderr(self, stdout=None, stderr=None): if stdout is not None: @@ -420,7 +421,7 @@ def populate_dir(path, files): p = os.path.sep.join([path, name]) util.ensure_dir(os.path.dirname(p)) with open(p, "wb") as fp: - if isinstance(content, six.binary_type): + if isinstance(content, bytes): fp.write(content) else: fp.write(content.encode('utf-8')) diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index d283f136..e57c15d1 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,8 +1,8 @@ # This file is part of cloud-init. See LICENSE file for license information. -from collections import namedtuple import os -import six +import io +from collections import namedtuple from cloudinit.cmd import main as cli from cloudinit.tests import helpers as test_helpers @@ -18,7 +18,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def setUp(self): super(TestCLI, self).setUp() - self.stderr = six.StringIO() + self.stderr = io.StringIO() self.patchStdoutAndStderr(stderr=self.stderr) def _call_main(self, sysv_args=None): @@ -147,7 +147,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_conditional_subcommands_from_entry_point_sys_argv(self): """Subcommands from entry-point are properly parsed from sys.argv.""" - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) expected_errors = [ @@ -178,7 +178,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_collect_logs_subcommand_parser(self): """The subcommand cloud-init collect-logs calls the subparser.""" # Provide -h param to collect-logs to avoid having to mock behavior. - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'collect-logs', '-h']) self.assertIn('usage: cloud-init collect-log', stdout.getvalue()) @@ -186,7 +186,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_clean_subcommand_parser(self): """The subcommand cloud-init clean calls the subparser.""" # Provide -h param to clean to avoid having to mock behavior. - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'clean', '-h']) self.assertIn('usage: cloud-init clean', stdout.getvalue()) @@ -194,7 +194,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_status_subcommand_parser(self): """The subcommand cloud-init status calls the subparser.""" # Provide -h param to clean to avoid having to mock behavior. - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'status', '-h']) self.assertIn('usage: cloud-init status', stdout.getvalue()) @@ -219,7 +219,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_wb_devel_schema_subcommand_doc_content(self): """Validate that doc content is sane from known examples.""" - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'devel', 'schema', '--doc']) expected_doc_sections = [ diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index d5b1c29c..62084de5 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -33,8 +33,6 @@ from cloudinit.sources.DataSourceSmartOS import ( identify_file) from cloudinit.event import EventType -import six - from cloudinit import helpers as c_helpers from cloudinit.util import ( b64e, subp, ProcessExecutionError, which, write_file) @@ -798,7 +796,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): return self.serial.write.call_args[0][0] def test_get_metadata_writes_bytes(self): - self.assertIsInstance(self._get_written_line(), six.binary_type) + self.assertIsInstance(self._get_written_line(), bytes) def test_get_metadata_line_starts_with_v2(self): foo = self._get_written_line() diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index f4311268..2dab3a54 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -4,7 +4,6 @@ import httpretty import json import logging import os -import six from cloudinit import cloud from cloudinit.config import cc_chef @@ -178,7 +177,7 @@ class TestChef(FilesystemMockingTestCase): continue # the value from the cfg overrides that in the default val = cfg['chef'].get(k, v) - if isinstance(val, six.string_types): + if isinstance(val, str): self.assertIn(val, c) c = util.load_file(cc_chef.CHEF_FB_PATH) self.assertEqual({}, json.loads(c)) diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py index bc8756ca..ed0a4da2 100644 --- a/tests/unittests/test_handler/test_handler_write_files.py +++ b/tests/unittests/test_handler/test_handler_write_files.py @@ -1,17 +1,16 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config.cc_write_files import write_files, decode_perms -from cloudinit import log as logging -from cloudinit import util - -from cloudinit.tests.helpers import CiTestCase, FilesystemMockingTestCase - import base64 import gzip +import io import shutil -import six import tempfile +from cloudinit import log as logging +from cloudinit import util +from cloudinit.config.cc_write_files import write_files, decode_perms +from cloudinit.tests.helpers import CiTestCase, FilesystemMockingTestCase + LOG = logging.getLogger(__name__) YAML_TEXT = """ @@ -138,7 +137,7 @@ class TestDecodePerms(CiTestCase): def _gzip_bytes(data): - buf = six.BytesIO() + buf = io.BytesIO() fp = None try: fp = gzip.GzipFile(fileobj=buf, mode="wb") diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index cd6296d6..e069a487 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -2,14 +2,15 @@ """Tests for cloudinit.log """ -from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT -from cloudinit import log as ci_logging -from cloudinit.tests.helpers import CiTestCase import datetime +import io import logging -import six import time +from cloudinit import log as ci_logging +from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT +from cloudinit.tests.helpers import CiTestCase + class TestCloudInitLogger(CiTestCase): @@ -18,7 +19,7 @@ class TestCloudInitLogger(CiTestCase): # of sys.stderr, we'll plug in a StringIO() object so we can see # what gets logged logging.Formatter.converter = time.gmtime - self.ci_logs = six.StringIO() + self.ci_logs = io.StringIO() self.ci_root = logging.getLogger() console = logging.StreamHandler(self.ci_logs) console.setFormatter(logging.Formatter(ci_logging.DEF_CON_FORMAT)) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 3a5072c7..10871bcf 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -13,13 +13,11 @@ import glob import os import random import re -import six import string SOURCE_PAT = "source*.*yaml" EXPECTED_PAT = "expected%s.yaml" -TYPES = [dict, str, list, tuple, None] -TYPES.extend(six.integer_types) +TYPES = [dict, str, list, tuple, None, int] def _old_mergedict(src, cand): @@ -85,7 +83,7 @@ def _make_dict(current_depth, max_depth, rand): pass if t in [tuple]: base = tuple(base) - elif t in six.integer_types: + elif t in [int]: base = rand.randint(0, 2 ** 8) elif t in [str]: base = _random_str(rand) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 0e71db82..75a3f0b4 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2,16 +2,15 @@ from __future__ import print_function +import io +import json import logging import os import re import shutil import stat -import tempfile - -import json -import six import sys +import tempfile import yaml from cloudinit import importer, util @@ -320,7 +319,7 @@ class TestLoadYaml(helpers.CiTestCase): def test_python_unicode(self): # complex type of python/unicode is explicitly allowed - myobj = {'1': six.text_type("FOOBAR")} + myobj = {'1': "FOOBAR"} safe_yaml = yaml.dump(myobj) self.assertEqual(util.load_yaml(blob=safe_yaml, default=self.mydefault), @@ -663,8 +662,8 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): self.patchOS(self.root) self.patchUtils(self.root) self.patchOpen(self.root) - self.stdout = six.StringIO() - self.stderr = six.StringIO() + self.stdout = io.StringIO() + self.stderr = io.StringIO() self.patchStdoutAndStderr(self.stdout, self.stderr) def test_stderr_used_by_default(self): @@ -879,8 +878,8 @@ class TestSubp(helpers.CiTestCase): """Raised exc should have stderr, stdout as string if no decode.""" with self.assertRaises(util.ProcessExecutionError) as cm: util.subp([BOGUS_COMMAND], decode=True) - self.assertTrue(isinstance(cm.exception.stdout, six.string_types)) - self.assertTrue(isinstance(cm.exception.stderr, six.string_types)) + self.assertTrue(isinstance(cm.exception.stdout, str)) + self.assertTrue(isinstance(cm.exception.stderr, str)) def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", -- cgit v1.2.3 From 9e3ac98097ed1c7f49ec8975a40aec7229231aae Mon Sep 17 00:00:00 2001 From: Louis Bouchard Date: Wed, 29 Jan 2020 16:55:09 +0100 Subject: Scaleway: Fix DatasourceScaleway to avoid backtrace (#128) Make sure network_config is created when self._network_config is unset. Co-authored-by: Scott Moser --- cloudinit/sources/DataSourceScaleway.py | 9 ++++- tests/unittests/test_datasource/test_scaleway.py | 49 ++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b573b382..83c2bf65 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -188,7 +188,7 @@ class DataSourceScaleway(sources.DataSource): self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) self._fallback_interface = None - self._network_config = None + self._network_config = sources.UNSET def _crawl_metadata(self): resp = url_helper.readurl(self.metadata_address, @@ -227,7 +227,12 @@ class DataSourceScaleway(sources.DataSource): Configure networking according to data received from the metadata API. """ - if self._network_config: + if self._network_config is None: + LOG.warning('Found None as cached _network_config. ' + 'Resetting to %s', sources.UNSET) + self._network_config = sources.UNSET + + if self._network_config != sources.UNSET: return self._network_config if self._fallback_interface is None: diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index f96bf0a2..1b4dd0ad 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,6 +7,7 @@ import requests from cloudinit import helpers from cloudinit import settings +from cloudinit import sources from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -403,3 +404,51 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_unset(self, m_get_cmdline, fallback_nic): + """ + _network_config will be set to sources.UNSET after the first boot. + Make sure it behave correctly. + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = sources.UNSET + + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}]}] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning') + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_cached_none(self, m_get_cmdline, fallback_nic, + logwarning): + """ + network_config() should return config data if cached data is None + rather than sources.UNSET + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = None + + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}]}] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + logwarning.assert_called_with('Found None as cached _network_config. ' + 'Resetting to %s', sources.UNSET) -- cgit v1.2.3 From 5f8f85bb38cc972d3d2c705a1ec73db3f690f323 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 29 Jan 2020 16:55:39 -0500 Subject: Replace mock library with unittest.mock (#186) * cloudinit: replace "import mock" with "from unittest import mock" * test-requirements.txt: drop mock Co-authored-by: Chad Smith --- cloudinit/config/tests/test_set_passwords.py | 2 +- cloudinit/net/tests/test_init.py | 2 +- cloudinit/net/tests/test_network_state.py | 3 ++- cloudinit/sources/tests/test_oracle.py | 2 +- cloudinit/tests/helpers.py | 2 +- cloudinit/tests/test_dhclient_hook.py | 2 +- cloudinit/tests/test_gpg.py | 4 ++-- cloudinit/tests/test_version.py | 4 ++-- test-requirements.txt | 1 - tests/unittests/test_datasource/test_aliyun.py | 2 +- tests/unittests/test_datasource/test_ec2.py | 2 +- tests/unittests/test_datasource/test_gce.py | 2 +- tests/unittests/test_datasource/test_maas.py | 2 +- tests/unittests/test_distros/test_user_data_normalize.py | 3 ++- tests/unittests/test_handler/test_handler_locale.py | 2 +- tests/unittests/test_reporting.py | 4 ++-- tests/unittests/test_reporting_hyperv.py | 2 +- tests/unittests/test_sshutil.py | 2 +- 18 files changed, 22 insertions(+), 21 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index 85e2f1fe..3b5cdd06 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -import mock +from unittest import mock from cloudinit.config import cc_set_passwords as setpass from cloudinit.tests.helpers import CiTestCase diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 6db93e26..5081a337 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -3,10 +3,10 @@ import copy import errno import httpretty -import mock import os import requests import textwrap +from unittest import mock import cloudinit.net as net from cloudinit.util import ensure_file, write_file, ProcessExecutionError diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py index fcb4a995..55880852 100644 --- a/cloudinit/net/tests/test_network_state.py +++ b/cloudinit/net/tests/test_network_state.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -import mock +from unittest import mock + from cloudinit.net import network_state from cloudinit.tests.helpers import CiTestCase diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 6c551fcb..abf3d359 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -11,9 +11,9 @@ import argparse import copy import httpretty import json -import mock import os import uuid +from unittest import mock DS_PATH = "cloudinit.sources.DataSourceOracle" MD_VER = "2013-10-17" diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 0220648d..70f6bad7 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -13,8 +13,8 @@ import string import sys import tempfile import time +from unittest import mock -import mock import unittest2 from unittest2.util import strclass diff --git a/cloudinit/tests/test_dhclient_hook.py b/cloudinit/tests/test_dhclient_hook.py index 7aab8dd5..eadae81c 100644 --- a/cloudinit/tests/test_dhclient_hook.py +++ b/cloudinit/tests/test_dhclient_hook.py @@ -7,8 +7,8 @@ from cloudinit.tests.helpers import CiTestCase, dir2dict, populate_dir import argparse import json -import mock import os +from unittest import mock class TestDhclientHook(CiTestCase): diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py index 0562b966..8dd57137 100644 --- a/cloudinit/tests/test_gpg.py +++ b/cloudinit/tests/test_gpg.py @@ -1,12 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. """Test gpg module.""" +from unittest import mock + from cloudinit import gpg from cloudinit import util from cloudinit.tests.helpers import CiTestCase -import mock - @mock.patch("cloudinit.gpg.time.sleep") @mock.patch("cloudinit.gpg.util.subp") diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py index a96c2a47..778a762c 100644 --- a/cloudinit/tests/test_version.py +++ b/cloudinit/tests/test_version.py @@ -1,10 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit.tests.helpers import CiTestCase from cloudinit import version -import mock - class TestExportsFeatures(CiTestCase): def test_has_network_config_v1(self): diff --git a/test-requirements.txt b/test-requirements.txt index d9d41b57..6fb22b24 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,5 @@ # Needed generally in tests httpretty>=0.7.1 -mock nose unittest2 coverage diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index e9213ca1..1e66fcdb 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -2,8 +2,8 @@ import functools import httpretty -import mock import os +from unittest import mock from cloudinit import helpers from cloudinit.sources import DataSourceAliYun as ay diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 34a089f2..19e1af2b 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -3,7 +3,7 @@ import copy import httpretty import json -import mock +from unittest import mock from cloudinit import helpers from cloudinit.sources import DataSourceEc2 as ec2 diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 67744d32..e9dd6e60 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -7,8 +7,8 @@ import datetime import httpretty import json -import mock import re +from unittest import mock from base64 import b64encode, b64decode from six.moves.urllib_parse import urlparse diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index c84d067e..2a81d3f5 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -1,11 +1,11 @@ # This file is part of cloud-init. See LICENSE file for license information. from copy import copy -import mock import os import shutil import tempfile import yaml +from unittest import mock from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index fa4b6cfe..a6faf0ef 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -1,12 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit import distros from cloudinit.distros import ug_util from cloudinit import helpers from cloudinit import settings from cloudinit.tests.helpers import TestCase -import mock bcfg = { diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py index e29a06f9..b3deb250 100644 --- a/tests/unittests/test_handler/test_handler_locale.py +++ b/tests/unittests/test_handler/test_handler_locale.py @@ -20,10 +20,10 @@ from configobj import ConfigObj from six import BytesIO import logging -import mock import os import shutil import tempfile +from unittest import mock LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index e15ba6cf..6814030e 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -2,12 +2,12 @@ # # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit import reporting from cloudinit.reporting import events from cloudinit.reporting import handlers -import mock - from cloudinit.tests.helpers import TestCase diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index 3582cf0b..b3e083c6 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -8,7 +8,7 @@ import os import struct import time import re -import mock +from unittest import mock from cloudinit import util from cloudinit.tests.helpers import CiTestCase diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index b227c20b..0be41924 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -from mock import patch from collections import namedtuple +from unittest.mock import patch from cloudinit import ssh_util from cloudinit.tests import helpers as test_helpers -- cgit v1.2.3 From 1bb1896ec900622e02c1ffb59db4d3f2df4a964d Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 31 Jan 2020 10:15:31 -0500 Subject: cloudinit: replace "from six import X" imports (except in util.py) (#183) --- cloudinit/cmd/devel/tests/test_logs.py | 2 +- cloudinit/cmd/devel/tests/test_render.py | 2 +- cloudinit/cmd/tests/test_clean.py | 2 +- cloudinit/cmd/tests/test_cloud_id.py | 2 +- cloudinit/cmd/tests/test_main.py | 2 +- cloudinit/cmd/tests/test_query.py | 2 +- cloudinit/cmd/tests/test_status.py | 2 +- cloudinit/config/cc_debug.py | 3 +-- cloudinit/config/cc_landscape.py | 3 +-- cloudinit/config/cc_puppet.py | 3 +-- cloudinit/config/cc_rightscale_userdata.py | 3 +-- cloudinit/config/cc_seed_random.py | 3 +-- cloudinit/config/cc_zypper_add_repo.py | 3 +-- cloudinit/config/tests/test_snap.py | 2 +- cloudinit/distros/parsers/hostname.py | 2 +- cloudinit/distros/parsers/hosts.py | 2 +- cloudinit/distros/parsers/resolv_conf.py | 2 +- cloudinit/helpers.py | 6 ++---- cloudinit/net/dhcp.py | 2 +- cloudinit/signal_handler.py | 3 +-- tests/unittests/test_data.py | 3 +-- tests/unittests/test_datasource/test_gce.py | 2 +- tests/unittests/test_datasource/test_openstack.py | 8 +++----- tests/unittests/test_distros/test_netconfig.py | 2 +- tests/unittests/test_filters/test_launch_index.py | 3 +-- tests/unittests/test_handler/test_handler_locale.py | 3 +-- tests/unittests/test_handler/test_handler_mcollective.py | 2 +- tests/unittests/test_handler/test_handler_seed_random.py | 3 +-- tests/unittests/test_handler/test_handler_set_hostname.py | 2 +- tests/unittests/test_handler/test_handler_timezone.py | 2 +- tests/unittests/test_handler/test_handler_yum_add_repo.py | 2 +- tests/unittests/test_handler/test_handler_zypper_add_repo.py | 2 +- tests/unittests/test_handler/test_schema.py | 2 +- 33 files changed, 36 insertions(+), 51 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py index 4951797b..d2dfa8de 100644 --- a/cloudinit/cmd/devel/tests/test_logs.py +++ b/cloudinit/cmd/devel/tests/test_logs.py @@ -2,7 +2,7 @@ from datetime import datetime import os -from six import StringIO +from io import StringIO from cloudinit.cmd.devel import logs from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py index 988bba03..a7fcf2ce 100644 --- a/cloudinit/cmd/devel/tests/test_render.py +++ b/cloudinit/cmd/devel/tests/test_render.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO import os +from io import StringIO from collections import namedtuple from cloudinit.cmd.devel import render diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index f092ab3d..13a69aa1 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -5,7 +5,7 @@ from cloudinit.util import ensure_dir, sym_link, write_file from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock from collections import namedtuple import os -from six import StringIO +from io import StringIO mypaths = namedtuple('MyPaths', 'cloud_dir') diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/cloudinit/cmd/tests/test_cloud_id.py index 73738170..3f3727fd 100644 --- a/cloudinit/cmd/tests/test_cloud_id.py +++ b/cloudinit/cmd/tests/test_cloud_id.py @@ -4,7 +4,7 @@ from cloudinit import util from collections import namedtuple -from six import StringIO +from io import StringIO from cloudinit.cmd import cloud_id diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index 57b8fdf5..384fddc6 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -3,7 +3,7 @@ from collections import namedtuple import copy import os -from six import StringIO +from io import StringIO from cloudinit.cmd import main from cloudinit import safeyaml diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py index c48605ad..6d36a4ea 100644 --- a/cloudinit/cmd/tests/test_query.py +++ b/cloudinit/cmd/tests/test_query.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import errno -from six import StringIO +from io import StringIO from textwrap import dedent import os diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index aded8580..1ed10896 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -2,7 +2,7 @@ from collections import namedtuple import os -from six import StringIO +from io import StringIO from textwrap import dedent from cloudinit.atomic_helper import write_json diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 610dbc8b..4d5a6aa2 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -28,8 +28,7 @@ location that this cloud-init has been configured with when running. """ import copy - -from six import StringIO +from io import StringIO from cloudinit import type_utils from cloudinit import util diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index eaf1e940..a9c04d86 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -56,8 +56,7 @@ The following default client config is provided, but can be overridden:: """ import os - -from six import BytesIO +from io import BytesIO from configobj import ConfigObj diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index b088db6e..c01f5b8f 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -77,11 +77,10 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 """ -from six import StringIO - import os import socket import yaml +from io import StringIO from cloudinit import helpers from cloudinit import util diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index bd8ee89f..a5aca038 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -50,13 +50,12 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``. # import os +from urllib.parse import parse_qs from cloudinit.settings import PER_INSTANCE from cloudinit import url_helper as uhelp from cloudinit import util -from six.moves.urllib_parse import parse_qs - frequency = PER_INSTANCE MY_NAME = "cc_rightscale_userdata" diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index a5d7c73f..b65f3ed9 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -61,8 +61,7 @@ used:: import base64 import os - -from six import BytesIO +from io import BytesIO from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py index aba26952..05855b0c 100644 --- a/cloudinit/config/cc_zypper_add_repo.py +++ b/cloudinit/config/cc_zypper_add_repo.py @@ -7,7 +7,6 @@ import configobj import os -from six import string_types from textwrap import dedent from cloudinit.config.schema import get_schema_doc @@ -110,7 +109,7 @@ def _format_repo_value(val): return 1 if val else 0 if isinstance(val, (list, tuple)): return "\n ".join([_format_repo_value(v) for v in val]) - if not isinstance(val, string_types): + if not isinstance(val, str): return str(val) return val diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index 3c472891..cbbb173d 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import re -from six import StringIO +from io import StringIO from cloudinit.config.cc_snap import ( ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse, diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index dd434ac6..e74c083c 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO +from io import StringIO from cloudinit.distros.parsers import chop_comment diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py index 64444581..54e4e934 100644 --- a/cloudinit/distros/parsers/hosts.py +++ b/cloudinit/distros/parsers/hosts.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO +from io import StringIO from cloudinit.distros.parsers import chop_comment diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index a62055ae..299d54b5 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO +from io import StringIO from cloudinit.distros.parsers import chop_comment from cloudinit import log as logging diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index dcd2645e..7d2a3305 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -12,10 +12,8 @@ from time import time import contextlib import os - -from six import StringIO -from six.moves.configparser import ( - NoSectionError, NoOptionError, RawConfigParser) +from configparser import NoSectionError, NoOptionError, RawConfigParser +from io import StringIO from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, CFG_ENV_NAME) diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index c033cc8e..19d0199c 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -10,6 +10,7 @@ import os import re import signal import time +from io import StringIO from cloudinit.net import ( EphemeralIPv4Network, find_fallback_nic, get_devicelist, @@ -17,7 +18,6 @@ from cloudinit.net import ( from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip from cloudinit import temp_utils from cloudinit import util -from six import StringIO LOG = logging.getLogger(__name__) diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py index 12fdfe6c..9272d22d 100644 --- a/cloudinit/signal_handler.py +++ b/cloudinit/signal_handler.py @@ -9,8 +9,7 @@ import inspect import signal import sys - -from six import StringIO +from io import StringIO from cloudinit import log as logging from cloudinit import util diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index c59db33d..74cc26ec 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -5,10 +5,9 @@ import gzip import logging import os +from io import BytesIO, StringIO from unittest import mock -from six import BytesIO, StringIO - from email import encoders from email.mime.application import MIMEApplication from email.mime.base import MIMEBase diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index e9dd6e60..4afbccff 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -9,9 +9,9 @@ import httpretty import json import re from unittest import mock +from urllib.parse import urlparse from base64 import b64encode, b64decode -from six.moves.urllib_parse import urlparse from cloudinit import distros from cloudinit import helpers diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index a731f1ed..f754556f 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -8,12 +8,11 @@ import copy import httpretty as hp import json import re +from io import StringIO +from urllib.parse import urlparse from cloudinit.tests import helpers as test_helpers -from six.moves.urllib.parse import urlparse -from six import StringIO, text_type - from cloudinit import helpers from cloudinit import settings from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET @@ -569,8 +568,7 @@ class TestMetadataReader(test_helpers.HttprettyTestCase): 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} def register(self, path, body=None, status=200): - content = (body if not isinstance(body, text_type) - else body.encode('utf-8')) + content = body if not isinstance(body, str) else body.encode('utf-8') hp.register_uri( hp.GET, self.burl + "openstack" + path, status=status, body=content) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 5ede4d77..5562e5d5 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -2,7 +2,7 @@ import copy import os -from six import StringIO +from io import StringIO from textwrap import dedent from unittest import mock diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py index e1a5d2c8..1492361e 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/test_filters/test_launch_index.py @@ -1,11 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. import copy +from itertools import filterfalse from cloudinit.tests import helpers -from six.moves import filterfalse - from cloudinit.filters import launch_index from cloudinit import user_data as ud from cloudinit import util diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py index b3deb250..2b22559f 100644 --- a/tests/unittests/test_handler/test_handler_locale.py +++ b/tests/unittests/test_handler/test_handler_locale.py @@ -17,12 +17,11 @@ from cloudinit.tests import helpers as t_help from configobj import ConfigObj -from six import BytesIO - import logging import os import shutil import tempfile +from io import BytesIO from unittest import mock LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py index 7eec7352..c013a538 100644 --- a/tests/unittests/test_handler/test_handler_mcollective.py +++ b/tests/unittests/test_handler/test_handler_mcollective.py @@ -10,8 +10,8 @@ import configobj import logging import os import shutil -from six import BytesIO import tempfile +from io import BytesIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py index f60dedc2..abecc53b 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/test_handler/test_handler_seed_random.py @@ -12,8 +12,7 @@ from cloudinit.config import cc_seed_random import gzip import tempfile - -from six import BytesIO +from io import BytesIO from cloudinit import cloud from cloudinit import distros diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index d09ec23a..58abf51a 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -13,8 +13,8 @@ from configobj import ConfigObj import logging import os import shutil -from six import BytesIO import tempfile +from io import BytesIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py index 27eedded..50c45363 100644 --- a/tests/unittests/test_handler/test_handler_timezone.py +++ b/tests/unittests/test_handler/test_handler_timezone.py @@ -18,8 +18,8 @@ from cloudinit.tests import helpers as t_help from configobj import ConfigObj import logging import shutil -from six import BytesIO import tempfile +from io import BytesIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index b90a3af3..0675bd8f 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -7,8 +7,8 @@ from cloudinit.tests import helpers import logging import shutil -from six import StringIO import tempfile +from io import StringIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/test_handler/test_handler_zypper_add_repo.py index 72ab6c08..9685ff28 100644 --- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py +++ b/tests/unittests/test_handler/test_handler_zypper_add_repo.py @@ -2,6 +2,7 @@ import glob import os +from io import StringIO from cloudinit.config import cc_zypper_add_repo from cloudinit import util @@ -10,7 +11,6 @@ from cloudinit.tests import helpers from cloudinit.tests.helpers import mock import logging -from six import StringIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index e69a47a9..987a89c9 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -10,7 +10,7 @@ from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema from copy import copy import os -from six import StringIO +from io import StringIO from textwrap import dedent from yaml import safe_load -- cgit v1.2.3 From 87cd040ed8fe7195cbb357ed3bbf53cd2a81436c Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 19 Feb 2020 15:01:09 -0600 Subject: ec2: Do not log IMDSv2 token values, instead use REDACTED (#219) Instead of logging the token values used log the headers and replace the actual values with the string 'REDACTED'. This allows users to examine cloud-init.log and see that the IMDSv2 token header is being used but avoids leaving the value used in the log file itself. LP: #1863943 --- cloudinit/ec2_utils.py | 12 ++++++++-- cloudinit/sources/DataSourceEc2.py | 35 +++++++++++++++++++---------- cloudinit/url_helper.py | 27 ++++++++++++++++------ tests/unittests/test_datasource/test_ec2.py | 17 ++++++++++++++ 4 files changed, 70 insertions(+), 21 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 57708c14..34acfe84 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -142,7 +142,8 @@ def skip_retry_on_codes(status_codes, _request_args, cause): def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - headers_cb=None, exception_cb=None): + headers_cb=None, headers_redact=None, + exception_cb=None): ud_url = url_helper.combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(ud_url, 'user-data') user_data = '' @@ -155,7 +156,8 @@ def get_instance_userdata(api_version='latest', SKIP_USERDATA_CODES) response = url_helper.read_file_or_url( ud_url, ssl_details=ssl_details, timeout=timeout, - retries=retries, exception_cb=exception_cb, headers_cb=headers_cb) + retries=retries, exception_cb=exception_cb, headers_cb=headers_cb, + headers_redact=headers_redact) user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: @@ -169,11 +171,13 @@ def _get_instance_metadata(tree, api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, + headers_redact=None, exception_cb=None): md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial( url_helper.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries, headers_cb=headers_cb, + headers_redact=headers_redact, exception_cb=exception_cb) def mcaller(url): @@ -197,6 +201,7 @@ def get_instance_metadata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, + headers_redact=None, exception_cb=None): # Note, 'meta-data' explicitly has trailing /. # this is required for CloudStack (LP: #1356855) @@ -204,6 +209,7 @@ def get_instance_metadata(api_version='latest', metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, retries=retries, leaf_decoder=leaf_decoder, + headers_redact=headers_redact, headers_cb=headers_cb, exception_cb=exception_cb) @@ -212,12 +218,14 @@ def get_instance_identity(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, + headers_redact=None, exception_cb=None): return _get_instance_metadata(tree='dynamic/instance-identity', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, retries=retries, leaf_decoder=leaf_decoder, + headers_redact=headers_redact, headers_cb=headers_cb, exception_cb=exception_cb) # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index b9f346a6..0f2bfef4 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -31,6 +31,9 @@ STRICT_ID_DEFAULT = "warn" API_TOKEN_ROUTE = 'latest/api/token' API_TOKEN_DISABLED = '_ec2_disable_api_token' AWS_TOKEN_TTL_SECONDS = '21600' +AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token' +AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds' +AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER] class CloudNames(object): @@ -158,7 +161,8 @@ class DataSourceEc2(sources.DataSource): for api_ver in self.extended_metadata_versions: url = url_tmpl.format(self.metadata_address, api_ver) try: - resp = uhelp.readurl(url=url, headers=headers) + resp = uhelp.readurl(url=url, headers=headers, + headers_redact=AWS_TOKEN_REDACT) except uhelp.UrlError as e: LOG.debug('url %s raised exception %s', url, e) else: @@ -180,6 +184,7 @@ class DataSourceEc2(sources.DataSource): self.identity = ec2.get_instance_identity( api_version, self.metadata_address, headers_cb=self._get_headers, + headers_redact=AWS_TOKEN_REDACT, exception_cb=self._refresh_stale_aws_token_cb).get( 'document', {}) return self.identity.get( @@ -205,7 +210,8 @@ class DataSourceEc2(sources.DataSource): LOG.debug('Fetching Ec2 IMDSv2 API Token') url, response = uhelp.wait_for_url( urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, - headers_cb=self._get_headers, request_method=request_method) + headers_cb=self._get_headers, request_method=request_method, + headers_redact=AWS_TOKEN_REDACT) if url and response: self._api_token = response @@ -252,7 +258,8 @@ class DataSourceEc2(sources.DataSource): url, _ = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds, status_cb=LOG.warning, - headers_cb=self._get_headers, request_method=request_method) + headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers, + request_method=request_method) if url: metadata_address = url2base[url] @@ -420,6 +427,7 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return {} api_version = self.get_metadata_api_version() + redact = AWS_TOKEN_REDACT crawled_metadata = {} if self.cloud_name == CloudNames.AWS: exc_cb = self._refresh_stale_aws_token_cb @@ -429,14 +437,17 @@ class DataSourceEc2(sources.DataSource): try: crawled_metadata['user-data'] = ec2.get_instance_userdata( api_version, self.metadata_address, - headers_cb=self._get_headers, exception_cb=exc_cb_ud) + headers_cb=self._get_headers, headers_redact=redact, + exception_cb=exc_cb_ud) crawled_metadata['meta-data'] = ec2.get_instance_metadata( api_version, self.metadata_address, - headers_cb=self._get_headers, exception_cb=exc_cb) + headers_cb=self._get_headers, headers_redact=redact, + exception_cb=exc_cb) if self.cloud_name == CloudNames.AWS: identity = ec2.get_instance_identity( api_version, self.metadata_address, - headers_cb=self._get_headers, exception_cb=exc_cb) + headers_cb=self._get_headers, headers_redact=redact, + exception_cb=exc_cb) crawled_metadata['dynamic'] = {'instance-identity': identity} except Exception: util.logexc( @@ -455,11 +466,12 @@ class DataSourceEc2(sources.DataSource): if self.cloud_name != CloudNames.AWS: return None LOG.debug("Refreshing Ec2 metadata API token") - request_header = {'X-aws-ec2-metadata-token-ttl-seconds': seconds} + request_header = {AWS_TOKEN_REQ_HEADER: seconds} token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) try: - response = uhelp.readurl( - token_url, headers=request_header, request_method="PUT") + response = uhelp.readurl(token_url, headers=request_header, + headers_redact=AWS_TOKEN_REDACT, + request_method="PUT") except uhelp.UrlError as e: LOG.warning( 'Unable to get API token: %s raised exception %s', @@ -500,8 +512,7 @@ class DataSourceEc2(sources.DataSource): API_TOKEN_DISABLED): return {} # Request a 6 hour token if URL is API_TOKEN_ROUTE - request_token_header = { - 'X-aws-ec2-metadata-token-ttl-seconds': AWS_TOKEN_TTL_SECONDS} + request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS} if API_TOKEN_ROUTE in url: return request_token_header if not self._api_token: @@ -511,7 +522,7 @@ class DataSourceEc2(sources.DataSource): self._api_token = self._refresh_api_token() if not self._api_token: return {} - return {'X-aws-ec2-metadata-token': self._api_token} + return {AWS_TOKEN_PUT_HEADER: self._api_token} class DataSourceEc2Local(DataSourceEc2): diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index f6d68436..eeb27aa8 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -8,6 +8,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import copy import json import os import time @@ -31,6 +32,7 @@ LOG = logging.getLogger(__name__) SSL_ENABLED = False CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) _REQ_VER = None +REDACTED = 'REDACTED' try: from distutils.version import LooseVersion import pkg_resources @@ -189,9 +191,9 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True, exception_cb=None, - session=None, infinite=False, log_req_resp=True, + headers=None, headers_cb=None, headers_redact=None, + ssl_details=None, check_status=True, allow_redirects=True, + exception_cb=None, session=None, infinite=False, log_req_resp=True, request_method=None): """Wrapper around requests.Session to read the url and retry if necessary @@ -207,6 +209,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, :param headers: Optional dict of headers to send during request :param headers_cb: Optional callable returning a dict of values to send as headers during request + :param headers_redact: Optional list of header names to redact from the log :param ssl_details: Optional dict providing key_file, ca_certs, and cert_file keys for use on in ssl connections. :param check_status: Optional boolean set True to raise when HTTPError @@ -233,6 +236,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, req_args['method'] = request_method if timeout is not None: req_args['timeout'] = max(float(timeout), 0) + if headers_redact is None: + headers_redact = [] # It doesn't seem like config # was added in older library versions (or newer ones either), thus we # need to manually do the retries if it wasn't... @@ -277,6 +282,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, if k == 'data': continue filtered_req_args[k] = v + if k == 'headers': + for hkey, _hval in v.items(): + if hkey in headers_redact: + filtered_req_args[k][hkey] = ( + copy.deepcopy(req_args[k][hkey])) + filtered_req_args[k][hkey] = REDACTED try: if log_req_resp: @@ -329,8 +340,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, return None # Should throw before this... -def wait_for_url(urls, max_wait=None, timeout=None, - status_cb=None, headers_cb=None, sleep_time=1, +def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, + headers_cb=None, headers_redact=None, sleep_time=1, exception_cb=None, sleep_time_cb=None, request_method=None): """ urls: a list of urls to try @@ -342,6 +353,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb: call method with string message when a url is not available headers_cb: call method with single argument of url to get headers for request. + headers_redact: a list of header names to redact from the log exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. sleep_time_cb: call method with 2 arguments (response, loop_n) that @@ -405,8 +417,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, headers = {} response = readurl( - url, headers=headers, timeout=timeout, - check_status=False, request_method=request_method) + url, headers=headers, headers_redact=headers_redact, + timeout=timeout, check_status=False, + request_method=request_method) if not response.contents: reason = "empty response [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 19e1af2b..2a96122f 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -429,6 +429,23 @@ class TestEc2(test_helpers.HttprettyTestCase): self.assertTrue(ds.get_data()) self.assertFalse(ds.is_classic_instance()) + def test_aws_token_redacted(self): + """Verify that aws tokens are redacted when logged.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + all_logs = self.logs.getvalue().splitlines() + REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" + REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" + logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] + logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] + logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] + self.assertEqual(1, len(logs_with_redacted_ttl)) + self.assertEqual(79, len(logs_with_redacted)) + self.assertEqual(0, len(logs_with_token)) + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" -- cgit v1.2.3