From 34f54360fcc1e0f805002a0b639d0a84eb2cb8ee Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 22 Feb 2019 13:26:31 +0000 Subject: azure: Filter list of ssh keys pulled from fabric The Azure data source is expected to expose a list of ssh keys for the user-to-be-provisioned in the crawled metadata. When configured to use the __builtin__ agent this list is built by the WALinuxAgentShim. The shim retrieves the full set of certificates and public keys exposed to the VM from the wireserver, extracts any ssh keys it can, and returns that list. This fix reduces that list of ssh keys to just the ones whose fingerprints appear in the "administrative user" section of the ovf-env.xml file. The Azure control plane exposes other ssh keys to the VM for other reasons, but those should not be added to the authorized_keys file for the provisioned user. --- cloudinit/sources/helpers/azure.py | 109 ++++++++++++++++++++++++++----------- 1 file changed, 78 insertions(+), 31 deletions(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index e5696b1f..2829dd20 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -138,9 +138,36 @@ class OpenSSLManager(object): self.certificate = certificate LOG.debug('New certificate generated.') - def parse_certificates(self, certificates_xml): - tag = ElementTree.fromstring(certificates_xml).find( - './/Data') + @staticmethod + def _run_x509_action(action, cert): + cmd = ['openssl', 'x509', '-noout', action] + result, _ = util.subp(cmd, data=cert) + return result + + def _get_ssh_key_from_cert(self, certificate): + pub_key = self._run_x509_action('-pubkey', certificate) + keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + ssh_key, _ = util.subp(keygen_cmd, data=pub_key) + return ssh_key + + def _get_fingerprint_from_cert(self, certificate): + """openssl x509 formats fingerprints as so: + 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ + B6:A8:BF:27:D4:73\n' + + Azure control plane passes that fingerprint as so: + '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + """ + raw_fp = self._run_x509_action('-fingerprint', certificate) + eq = raw_fp.find('=') + octets = raw_fp[eq+1:-1].split(':') + return ''.join(octets) + + def _decrypt_certs_from_xml(self, certificates_xml): + """Decrypt the certificates XML document using the our private key; + return the list of certs and private keys contained in the doc. + """ + tag = ElementTree.fromstring(certificates_xml).find('.//Data') certificates_content = tag.text lines = [ b'MIME-Version: 1.0', @@ -151,32 +178,30 @@ class OpenSSLManager(object): certificates_content.encode('utf-8'), ] with cd(self.tmpdir): - with open('Certificates.p7m', 'wb') as f: - f.write(b'\n'.join(lines)) out, _ = util.subp( - 'openssl cms -decrypt -in Certificates.p7m -inkey' + 'openssl cms -decrypt -in /dev/stdin -inkey' ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' ' -password pass:'.format(**self.certificate_names), - shell=True) - private_keys, certificates = [], [] + shell=True, data=b'\n'.join(lines)) + return out + + def parse_certificates(self, certificates_xml): + """Given the Certificates XML document, return a dictionary of + fingerprints and associated SSH keys derived from the certs.""" + out = self._decrypt_certs_from_xml(certificates_xml) current = [] + keys = {} for line in out.splitlines(): current.append(line) if re.match(r'[-]+END .*?KEY[-]+$', line): - private_keys.append('\n'.join(current)) + # ignore private_keys current = [] elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificates.append('\n'.join(current)) + certificate = '\n'.join(current) + ssh_key = self._get_ssh_key_from_cert(certificate) + fingerprint = self._get_fingerprint_from_cert(certificate) + keys[fingerprint] = ssh_key current = [] - keys = [] - for certificate in certificates: - with cd(self.tmpdir): - public_key, _ = util.subp( - 'openssl x509 -noout -pubkey |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin', - data=certificate, - shell=True) - keys.append(public_key) return keys @@ -206,7 +231,6 @@ class WALinuxAgentShim(object): self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None - self.values = {} self.lease_file = fallback_lease_file def clean_up(self): @@ -328,8 +352,9 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address - def register_with_azure_and_fetch_data(self): - self.openssl_manager = OpenSSLManager() + def register_with_azure_and_fetch_data(self, pubkey_info=None): + if self.openssl_manager is None: + self.openssl_manager = OpenSSLManager() http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') attempts = 0 @@ -347,16 +372,37 @@ class WALinuxAgentShim(object): attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) - public_keys = [] - if goal_state.certificates_xml is not None: + ssh_keys = [] + if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') - public_keys = self.openssl_manager.parse_certificates( + keys_by_fingerprint = self.openssl_manager.parse_certificates( goal_state.certificates_xml) - data = { - 'public-keys': public_keys, - } + ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) self._report_ready(goal_state, http_client) - return data + return {'public-keys': ssh_keys} + + def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): + """cloud-init expects a straightforward array of keys to be dropped + into the user's authorized_keys file. Azure control plane exposes + multiple public keys to the VM via wireserver. Select just the + user's key(s) and return them, ignoring any other certs. + """ + keys = [] + for pubkey in pubkey_info: + if 'value' in pubkey and pubkey['value']: + keys.append(pubkey['value']) + elif 'fingerprint' in pubkey and pubkey['fingerprint']: + fingerprint = pubkey['fingerprint'] + if fingerprint in keys_by_fingerprint: + keys.append(keys_by_fingerprint[fingerprint]) + else: + LOG.warning("ovf-env.xml specified PublicKey fingerprint " + "%s not found in goalstate XML", fingerprint) + else: + LOG.warning("ovf-env.xml specified PublicKey with neither " + "value nor fingerprint: %s", pubkey) + + return keys def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') @@ -373,11 +419,12 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, + pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts) try: - return shim.register_with_azure_and_fetch_data() + return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) finally: shim.clean_up() -- cgit v1.2.3 From 5352dd99eb2937b4eaaaf596b40ad7ca69d87f64 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Mar 2019 18:41:05 +0000 Subject: helpers/openstack: Treat unknown link types as physical Some deployments of OpenStack expose link types to the guest which cloud-init doesn't recognise. These will almost always be physical, so we can operate more robustly if we assume that they are (whilst warning the user that we're seeing something unexpected). LP: #1639263 --- cloudinit/sources/helpers/openstack.py | 12 +++++------ .../unittests/test_datasource/test_configdrive.py | 23 ++++++++++++++++++++++ 2 files changed, 29 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9c29ceac..8f069115 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -67,7 +67,7 @@ OS_VERSIONS = ( OS_ROCKY, ) -PHYSICAL_TYPES = ( +KNOWN_PHYSICAL_TYPES = ( None, 'bgpovs', # not present in OpenStack upstream but used on OVH cloud. 'bridge', @@ -600,9 +600,7 @@ def convert_net_json(network_json=None, known_macs=None): subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in PHYSICAL_TYPES: - cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) - elif link['type'] in ['bond']: + if link['type'] in ['bond']: params = {} if link_mac_addr: params['mac_address'] = link_mac_addr @@ -641,8 +639,10 @@ def convert_net_json(network_json=None, known_macs=None): curinfo.update({'mac': link['vlan_mac_address'], 'name': name}) else: - raise ValueError( - 'Unknown network_data link type: %s' % link['type']) + if link['type'] not in KNOWN_PHYSICAL_TYPES: + LOG.warning('Unknown network_data link type (%s); treating as' + ' physical', link['type']) + cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) config.append(cfg) link_id_info[curinfo['id']] = curinfo diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 7a6802f6..520c50fe 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -600,6 +600,9 @@ class TestNetJson(CiTestCase): class TestConvertNetworkData(CiTestCase): + + with_logs = True + def setUp(self): super(TestConvertNetworkData, self).setUp() self.tmp = self.tmp_dir() @@ -726,6 +729,26 @@ class TestConvertNetworkData(CiTestCase): 'enp0s2': 'fa:16:3e:d4:57:ad'} self.assertEqual(expected, config_name2mac) + def test_unknown_device_types_accepted(self): + # If we don't recognise a link, we should treat it as physical for a + # best-effort boot + my_netdata = deepcopy(NETWORK_DATA) + my_netdata['links'][0]['type'] = 'my-special-link-type' + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + # We should, however, warn the user that we don't recognise the type + self.assertIn('Unknown network_data link type (my-special-link-type)', + self.logs.getvalue()) + def cfg_ds_from_dir(base_d, files=None): run = os.path.join(base_d, "run") -- cgit v1.2.3 From 0d8c88393b51db6454491a379dcc2e691551217a Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 3 Apr 2019 18:23:18 +0000 Subject: DatasourceAzure: add additional logging for azure datasource Create an Azure logging decorator and use additional ReportEventStack context managers to provide additional logging details. --- cloudinit/sources/DataSourceAzure.py | 231 ++++++++++++++++++++++------------- cloudinit/sources/helpers/azure.py | 31 +++++ 2 files changed, 179 insertions(+), 83 deletions(-) mode change 100644 => 100755 cloudinit/sources/DataSourceAzure.py mode change 100644 => 100755 cloudinit/sources/helpers/azure.py (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py old mode 100644 new mode 100755 index b4e3f061..d4230b3c --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -21,10 +21,14 @@ from cloudinit import net from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources -from cloudinit.sources.helpers.azure import get_metadata_from_fabric from cloudinit.sources.helpers import netlink from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util +from cloudinit.reporting import events + +from cloudinit.sources.helpers.azure import (azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric) LOG = logging.getLogger(__name__) @@ -244,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'): util.subp([hostname_command, hostname]) +@azure_ds_telemetry_reporter @contextlib.contextmanager def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ @@ -290,6 +295,7 @@ class DataSourceAzure(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + @azure_ds_telemetry_reporter def bounce_network_with_azure_hostname(self): # When using cloud-init to provision, we have to set the hostname from # the metadata and "bounce" the network to force DDNS to update via @@ -315,6 +321,7 @@ class DataSourceAzure(sources.DataSource): util.logexc(LOG, "handling set_hostname failed") return False + @azure_ds_telemetry_reporter def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') agent_cmd = self.ds_cfg['agent_command'] @@ -344,15 +351,18 @@ class DataSourceAzure(sources.DataSource): LOG.debug("ssh authentication: " "using fingerprint from fabirc") - # wait very long for public SSH keys to arrive - # https://bugs.launchpad.net/cloud-init/+bug/1717611 - missing = util.log_time(logfunc=LOG.debug, - msg="waiting for SSH public key files", - func=util.wait_for_files, - args=(fp_files, 900)) - - if len(missing): - LOG.warning("Did not find files, but going on: %s", missing) + with events.ReportEventStack( + name="waiting-for-ssh-public-key", + description="wait for agents to retrieve ssh keys", + parent=azure_ds_reporter): + # wait very long for public SSH keys to arrive + # https://bugs.launchpad.net/cloud-init/+bug/1717611 + missing = util.log_time(logfunc=LOG.debug, + msg="waiting for SSH public key files", + func=util.wait_for_files, + args=(fp_files, 900)) + if len(missing): + LOG.warning("Did not find files, but going on: %s", missing) metadata = {} metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) @@ -366,6 +376,7 @@ class DataSourceAzure(sources.DataSource): subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.seed) + @azure_ds_telemetry_reporter def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @@ -467,6 +478,7 @@ class DataSourceAzure(sources.DataSource): super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) self._metadata_imds = sources.UNSET + @azure_ds_telemetry_reporter def _get_data(self): """Crawl and process datasource metadata caching metadata as attrs. @@ -513,6 +525,7 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: LOG.debug("negotiating for %s (new_instance=%s)", @@ -580,6 +593,7 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ try: @@ -617,9 +631,14 @@ class DataSourceAzure(sources.DataSource): def _reprovision(self): """Initiate the reprovisioning workflow.""" contents = self._poll_imds() - md, ud, cfg = read_azure_ovf(contents) - return (md, ud, cfg, {'ovf-env.xml': contents}) - + with events.ReportEventStack( + name="reprovisioning-read-azure-ovf", + description="read azure ovf during reprovisioning", + parent=azure_ds_reporter): + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + + @azure_ds_telemetry_reporter def _negotiate(self): """Negotiate with fabric and return data from it. @@ -652,6 +671,7 @@ class DataSourceAzure(sources.DataSource): util.del_file(REPROVISION_MARKER_FILE) return fabric_data + @azure_ds_telemetry_reporter def activate(self, cfg, is_new_instance): address_ephemeral_resize(is_new_instance=is_new_instance, preserve_ntfs=self.ds_cfg.get( @@ -690,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16): return [] +@azure_ds_telemetry_reporter def _has_ntfs_filesystem(devpath): ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) LOG.debug('ntfs_devices found = %s', ntfs_devices) return os.path.realpath(devpath) in ntfs_devices +@azure_ds_telemetry_reporter def can_dev_be_reformatted(devpath, preserve_ntfs): """Determine if the ephemeral drive at devpath should be reformatted. @@ -744,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): (cand_part, cand_path, devpath)) return False, msg + @azure_ds_telemetry_reporter def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) bmsg = ('partition %s (%s) on device %s was ntfs formatted' % (cand_part, cand_path, devpath)) - try: - file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", - update_env_for_mount={'LANG': 'C'}) - except util.MountFailedError as e: - if "unknown filesystem type 'ntfs'" in str(e): - return True, (bmsg + ' but this system cannot mount NTFS,' - ' assuming there are no important files.' - ' Formatting allowed.') - return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) - - if file_count != 0: - LOG.warning("it looks like you're using NTFS on the ephemeral disk, " - 'to ensure that filesystem does not get wiped, set ' - '%s.%s in config', '.'.join(DS_CFG_PATH), - DS_CFG_KEY_PRESERVE_NTFS) - return False, bmsg + ' but had %d files on it.' % file_count + + with events.ReportEventStack( + name="mount-ntfs-and-count", + description="mount-ntfs-and-count", + parent=azure_ds_reporter) as evt: + try: + file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", + update_env_for_mount={'LANG': 'C'}) + except util.MountFailedError as e: + evt.description = "cannot mount ntfs" + if "unknown filesystem type 'ntfs'" in str(e): + return True, (bmsg + ' but this system cannot mount NTFS,' + ' assuming there are no important files.' + ' Formatting allowed.') + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) + + if file_count != 0: + evt.description = "mounted and counted %d files" % file_count + LOG.warning("it looks like you're using NTFS on the ephemeral" + " disk, to ensure that filesystem does not get wiped," + " set %s.%s in config", '.'.join(DS_CFG_PATH), + DS_CFG_KEY_PRESERVE_NTFS) + return False, bmsg + ' but had %d files on it.' % file_count return True, bmsg + ' and had no important files. Safe for reformatting.' +@azure_ds_telemetry_reporter def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, is_new_instance=False, preserve_ntfs=False): # wait for ephemeral disk to come up naplen = .2 - missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, - log_pre="Azure ephemeral disk: ") - - if missing: - LOG.warning("ephemeral device '%s' did not appear after %d seconds.", - devpath, maxwait) - return + with events.ReportEventStack( + name="wait-for-ephemeral-disk", + description="wait for ephemeral disk", + parent=azure_ds_reporter): + missing = util.wait_for_files([devpath], + maxwait=maxwait, + naplen=naplen, + log_pre="Azure ephemeral disk: ") + + if missing: + LOG.warning("ephemeral device '%s' did" + " not appear after %d seconds.", + devpath, maxwait) + return result = False msg = None @@ -808,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, return +@azure_ds_telemetry_reporter def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command @@ -843,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): return True +@azure_ds_telemetry_reporter def crtfile_to_pubkey(fname, data=None): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') @@ -851,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None): return out.rstrip() +@azure_ds_telemetry_reporter def pubkeys_from_crt_files(flist): pubkeys = [] errors = [] @@ -866,6 +907,7 @@ def pubkeys_from_crt_files(flist): return pubkeys +@azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): def _redact_password(cnt, fname): @@ -893,6 +935,7 @@ def write_files(datadir, files, dirmode=None): util.write_file(filename=fname, content=content, mode=0o600) +@azure_ds_telemetry_reporter def invoke_agent(cmd): # this is a function itself to simplify patching it for test if cmd: @@ -912,6 +955,7 @@ def find_child(node, filter_func): return ret +@azure_ds_telemetry_reporter def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. @@ -964,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode): return found +@azure_ds_telemetry_reporter def read_azure_ovf(contents): try: dom = minidom.parseString(contents) @@ -1064,6 +1109,7 @@ def read_azure_ovf(contents): return (md, ud, cfg) +@azure_ds_telemetry_reporter def _extract_preprovisioned_vm_setting(dom): """Read the preprovision flag from the ovf. It should not exist unless true.""" @@ -1092,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) +@azure_ds_telemetry_reporter def _check_freebsd_cdrom(cdrom_dev): """Return boolean indicating path to cdrom device has content.""" try: @@ -1103,6 +1150,7 @@ def _check_freebsd_cdrom(cdrom_dev): return False +@azure_ds_telemetry_reporter def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" @@ -1126,6 +1174,7 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): return seed +@azure_ds_telemetry_reporter def list_possible_azure_ds_devs(): devlist = [] if util.is_FreeBSD(): @@ -1140,6 +1189,7 @@ def list_possible_azure_ds_devs(): return devlist +@azure_ds_telemetry_reporter def load_azure_ds_dir(source_dir): ovf_file = os.path.join(source_dir, "ovf-env.xml") @@ -1162,47 +1212,54 @@ def parse_network_config(imds_metadata): @param: imds_metadata: Dict of content read from IMDS network service. @return: Dictionary containing network version 2 standard configuration. """ - if imds_metadata != sources.UNSET and imds_metadata: - netconfig = {'version': 2, 'ethernets': {}} - LOG.debug('Azure: generating network configuration from IMDS') - network_metadata = imds_metadata['network'] - for idx, intf in enumerate(network_metadata['interface']): - nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} - for addr4 in intf['ipv4']['ipAddress']: - privateIpv4 = addr4['privateIpAddress'] - if privateIpv4: - if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 - netPrefix = intf['ipv4']['subnet'][0].get( - 'prefix', '24') - if not dev_config.get('addresses'): - dev_config['addresses'] = [] - dev_config['addresses'].append( - '{ip}/{prefix}'.format( - ip=privateIpv4, prefix=netPrefix)) - else: - dev_config['dhcp4'] = True - for addr6 in intf['ipv6']['ipAddress']: - privateIpv6 = addr6['privateIpAddress'] - if privateIpv6: - dev_config['dhcp6'] = True - break - if dev_config: - mac = ':'.join(re.findall(r'..', intf['macAddress'])) - dev_config.update( - {'match': {'macaddress': mac.lower()}, - 'set-name': nicname}) - netconfig['ethernets'][nicname] = dev_config - else: - blacklist = ['mlx4_core'] - LOG.debug('Azure: generating fallback configuration') - # generate a network config, blacklist picking mlx4_core devs - netconfig = net.generate_fallback_config( - blacklist_drivers=blacklist, config_driver=True) - return netconfig + with events.ReportEventStack( + name="parse_network_config", + description="", + parent=azure_ds_reporter) as evt: + if imds_metadata != sources.UNSET and imds_metadata: + netconfig = {'version': 2, 'ethernets': {}} + LOG.debug('Azure: generating network configuration from IMDS') + network_metadata = imds_metadata['network'] + for idx, intf in enumerate(network_metadata['interface']): + nicname = 'eth{idx}'.format(idx=idx) + dev_config = {} + for addr4 in intf['ipv4']['ipAddress']: + privateIpv4 = addr4['privateIpAddress'] + if privateIpv4: + if dev_config.get('dhcp4', False): + # Append static address config for nic > 1 + netPrefix = intf['ipv4']['subnet'][0].get( + 'prefix', '24') + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIpv4, prefix=netPrefix)) + else: + dev_config['dhcp4'] = True + for addr6 in intf['ipv6']['ipAddress']: + privateIpv6 = addr6['privateIpAddress'] + if privateIpv6: + dev_config['dhcp6'] = True + break + if dev_config: + mac = ':'.join(re.findall(r'..', intf['macAddress'])) + dev_config.update( + {'match': {'macaddress': mac.lower()}, + 'set-name': nicname}) + netconfig['ethernets'][nicname] = dev_config + evt.description = "network config from imds" + else: + blacklist = ['mlx4_core'] + LOG.debug('Azure: generating fallback configuration') + # generate a network config, blacklist picking mlx4_core devs + netconfig = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True) + evt.description = "network config from fallback" + return netconfig +@azure_ds_telemetry_reporter def get_metadata_from_imds(fallback_nic, retries): """Query Azure's network metadata service, returning a dictionary. @@ -1227,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries): return util.log_time(**kwargs) +@azure_ds_telemetry_reporter def _get_metadata_from_imds(retries): url = IMDS_URL + "instance?api-version=2017-12-01" @@ -1246,6 +1304,7 @@ def _get_metadata_from_imds(retries): return {} +@azure_ds_telemetry_reporter def maybe_remove_ubuntu_network_config_scripts(paths=None): """Remove Azure-specific ubuntu network config for non-primary nics. @@ -1283,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): def _is_platform_viable(seed_dir): - """Check platform environment to report if this datasource may run.""" - asset_tag = util.read_dmi_data('chassis-asset-tag') - if asset_tag == AZURE_CHASSIS_ASSET_TAG: - return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): - return True - return False + with events.ReportEventStack( + name="check-platform-viability", + description="found azure asset tag", + parent=azure_ds_reporter) as evt: + + """Check platform environment to report if this datasource may run.""" + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag == AZURE_CHASSIS_ASSET_TAG: + return True + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): + return True + return False class BrokenAzureDataSource(Exception): diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py old mode 100644 new mode 100755 index 2829dd20..d3af05ee --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,10 +16,27 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit.reporting import events LOG = logging.getLogger(__name__) +azure_ds_reporter = events.ReportEventStack( + name="azure-ds", + description="initialize reporter for azure ds", + reporting_enabled=True) + + +def azure_ds_telemetry_reporter(func): + def impl(*args, **kwargs): + with events.ReportEventStack( + name=func.__name__, + description=func.__name__, + parent=azure_ds_reporter): + return func(*args, **kwargs) + return impl + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -119,6 +136,7 @@ class OpenSSLManager(object): def clean_up(self): util.del_dir(self.tmpdir) + @azure_ds_telemetry_reporter def generate_certificate(self): LOG.debug('Generating certificate for communication with fabric...') if self.certificate is not None: @@ -139,17 +157,20 @@ class OpenSSLManager(object): LOG.debug('New certificate generated.') @staticmethod + @azure_ds_telemetry_reporter def _run_x509_action(action, cert): cmd = ['openssl', 'x509', '-noout', action] result, _ = util.subp(cmd, data=cert) return result + @azure_ds_telemetry_reporter def _get_ssh_key_from_cert(self, certificate): pub_key = self._run_x509_action('-pubkey', certificate) keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] ssh_key, _ = util.subp(keygen_cmd, data=pub_key) return ssh_key + @azure_ds_telemetry_reporter def _get_fingerprint_from_cert(self, certificate): """openssl x509 formats fingerprints as so: 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ @@ -163,6 +184,7 @@ class OpenSSLManager(object): octets = raw_fp[eq+1:-1].split(':') return ''.join(octets) + @azure_ds_telemetry_reporter def _decrypt_certs_from_xml(self, certificates_xml): """Decrypt the certificates XML document using the our private key; return the list of certs and private keys contained in the doc. @@ -185,6 +207,7 @@ class OpenSSLManager(object): shell=True, data=b'\n'.join(lines)) return out + @azure_ds_telemetry_reporter def parse_certificates(self, certificates_xml): """Given the Certificates XML document, return a dictionary of fingerprints and associated SSH keys derived from the certs.""" @@ -265,11 +288,13 @@ class WALinuxAgentShim(object): return socket.inet_ntoa(packed_bytes) @staticmethod + @azure_ds_telemetry_reporter def _networkd_get_value_from_leases(leases_d=None): return dhcp.networkd_get_option_from_leases( 'OPTION_245', leases_d=leases_d) @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] content = util.load_file(fallback_lease_file) @@ -287,6 +312,7 @@ class WALinuxAgentShim(object): return leases[-1] @staticmethod + @azure_ds_telemetry_reporter def _load_dhclient_json(): dhcp_options = {} hooks_dir = WALinuxAgentShim._get_hooks_dir() @@ -305,6 +331,7 @@ class WALinuxAgentShim(object): return dhcp_options @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_dhcpoptions(dhcp_options): if dhcp_options is None: return None @@ -318,6 +345,7 @@ class WALinuxAgentShim(object): return _value @staticmethod + @azure_ds_telemetry_reporter def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None if dhcp245 is not None: @@ -352,6 +380,7 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address + @azure_ds_telemetry_reporter def register_with_azure_and_fetch_data(self, pubkey_info=None): if self.openssl_manager is None: self.openssl_manager = OpenSSLManager() @@ -404,6 +433,7 @@ class WALinuxAgentShim(object): return keys + @azure_ds_telemetry_reporter def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') document = self.REPORT_READY_XML_TEMPLATE.format( @@ -419,6 +449,7 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') +@azure_ds_telemetry_reporter def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, -- cgit v1.2.3 From baa478546d8cac98a706010699d64f8c2f70b5bf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 10 May 2019 18:38:55 +0000 Subject: Azure: Return static fallback address as if failed to find endpoint The Azure data source helper attempts to use information in the dhcp lease to find the Wireserver endpoint (IP address). Under some unusual circumstances, those attempts will fail. This change uses a static address, known to be always correct in the Azure public and sovereign clouds, when the helper fails to locate a valid dhcp lease. This address is not guaranteed to be correct in Azure Stack environments; it's still best to use the information from the lease whenever possible. --- cloudinit/sources/helpers/azure.py | 14 +++++++++++--- tests/unittests/test_datasource/test_azure_helper.py | 9 +++++++-- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index d3af05ee..82c4c8c4 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -20,6 +20,9 @@ from cloudinit.reporting import events LOG = logging.getLogger(__name__) +# This endpoint matches the format as found in dhcp lease files, since this +# value is applied if the endpoint can't be found within a lease file +DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" azure_ds_reporter = events.ReportEventStack( name="azure-ds", @@ -297,7 +300,12 @@ class WALinuxAgentShim(object): @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] - content = util.load_file(fallback_lease_file) + try: + content = util.load_file(fallback_lease_file) + except IOError as ex: + LOG.error("Failed to read %s: %s", fallback_lease_file, ex) + return None + LOG.debug("content is %s", content) option_name = _get_dhcp_endpoint_option_name() for line in content.splitlines(): @@ -372,9 +380,9 @@ class WALinuxAgentShim(object): fallback_lease_file) value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) - if value is None: - raise ValueError('No endpoint found.') + LOG.warning("No lease found; using default endpoint") + value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 02556165..bd006aba 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -67,12 +67,17 @@ class TestFindEndpoint(CiTestCase): self.networkd_leases.return_value = None def test_missing_file(self): - self.assertRaises(ValueError, wa_shim.find_endpoint) + """wa_shim find_endpoint uses default endpoint if leasefile not found + """ + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") def test_missing_special_azure_line(self): + """wa_shim find_endpoint uses default endpoint if leasefile is found + but does not contain DHCP Option 245 (whose value is the endpoint) + """ self.load_file.return_value = '' self.dhcp_options.return_value = {'eth0': {'key': 'value'}} - self.assertRaises(ValueError, wa_shim.find_endpoint) + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") @staticmethod def _build_lease_content(encoded_address): -- cgit v1.2.3 From 9c47c682b7aaa185c32a68f4dea8e23e9a2ef565 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Tue, 16 Jul 2019 13:09:38 +0000 Subject: VMWare: Trigger the post customization script via cc_scripts module. cloud-init does not trigger reboots of a VM therefore adding custom scripts to rc.local does not execute the post scripts. This patch moves post-scripts into per-instance scripts dir and has cc_scripts module run the post-scripts. Also in this branch: - Remove the sh interpreter and execute the customization script directly. - Update the unit test. LP: #1833192 --- cloudinit/sources/DataSourceOVF.py | 7 +- .../helpers/vmware/imc/config_custom_script.py | 143 ++++++--------------- tests/unittests/test_vmware/test_custom_script.py | 116 +++++++++-------- 3 files changed, 111 insertions(+), 155 deletions(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 70e7a5c0..dd941d2e 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -148,6 +148,9 @@ class DataSourceOVF(sources.DataSource): product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name + ccScriptsDir = os.path.join( + self.paths.get_cpath("scripts"), + "per-instance") except Exception as e: _raise_error_status( "Error parsing the customization Config File", @@ -201,7 +204,9 @@ class DataSourceOVF(sources.DataSource): if customscript: try: - postcust = PostCustomScript(customscript, imcdirpath) + postcust = PostCustomScript(customscript, + imcdirpath, + ccScriptsDir) postcust.execute() except Exception as e: _raise_error_status( diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py index a7d4ad91..9f14770e 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py @@ -1,5 +1,5 @@ # Copyright (C) 2017 Canonical Ltd. -# Copyright (C) 2017 VMware Inc. +# Copyright (C) 2017-2019 VMware Inc. # # Author: Maitreyee Saikia # @@ -8,7 +8,6 @@ import logging import os import stat -from textwrap import dedent from cloudinit import util @@ -20,12 +19,15 @@ class CustomScriptNotFound(Exception): class CustomScriptConstant(object): - RC_LOCAL = "/etc/rc.local" - POST_CUST_TMP_DIR = "/root/.customization" - POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh" - POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR, - POST_CUST_RUN_SCRIPT_NAME) - POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + CUSTOM_TMP_DIR = "/root/.customization" + + # The user defined custom script + CUSTOM_SCRIPT_NAME = "customize.sh" + CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, + CUSTOM_SCRIPT_NAME) + POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + # The cc_scripts_per_instance script to launch custom script + POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh" class RunCustomScript(object): @@ -39,10 +41,19 @@ class RunCustomScript(object): raise CustomScriptNotFound("Script %s not found!! " "Cannot execute custom script!" % self.scriptpath) + + util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR) + + LOG.debug("Copying custom script to %s", + CustomScriptConstant.CUSTOM_SCRIPT) + util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT) + # Strip any CR characters from the decoded script - util.load_file(self.scriptpath).replace("\r", "") - st = os.stat(self.scriptpath) - os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC) + content = util.load_file( + CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "") + util.write_file(CustomScriptConstant.CUSTOM_SCRIPT, + content, + mode=0o544) class PreCustomScript(RunCustomScript): @@ -50,104 +61,34 @@ class PreCustomScript(RunCustomScript): """Executing custom script with precustomization argument.""" LOG.debug("Executing pre-customization script") self.prepare_script() - util.subp(["/bin/sh", self.scriptpath, "precustomization"]) + util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"]) class PostCustomScript(RunCustomScript): - def __init__(self, scriptname, directory): + def __init__(self, scriptname, directory, ccScriptsDir): super(PostCustomScript, self).__init__(scriptname, directory) - # Determine when to run custom script. When postreboot is True, - # the user uploaded script will run as part of rc.local after - # the machine reboots. This is determined by presence of rclocal. - # When postreboot is False, script will run as part of cloud-init. - self.postreboot = False - - def _install_post_reboot_agent(self, rclocal): - """ - Install post-reboot agent for running custom script after reboot. - As part of this process, we are editing the rclocal file to run a - VMware script, which in turn is resposible for handling the user - script. - @param: path to rc local. - """ - LOG.debug("Installing post-reboot customization from %s to %s", - self.directory, rclocal) - if not self.has_previous_agent(rclocal): - LOG.info("Adding post-reboot customization agent to rc.local") - new_content = dedent(""" - # Run post-reboot guest customization - /bin/sh %s - exit 0 - """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT - existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') - st = os.stat(rclocal) - # "x" flag should be set - mode = st.st_mode | stat.S_IEXEC - util.write_file(rclocal, existing_rclocal + new_content, mode) - - else: - # We don't need to update rclocal file everytime a customization - # is requested. It just needs to be done for the first time. - LOG.info("Post-reboot guest customization agent is already " - "registered in rc.local") - LOG.debug("Installing post-reboot customization agent finished: %s", - self.postreboot) - - def has_previous_agent(self, rclocal): - searchstring = "# Run post-reboot guest customization" - if searchstring in open(rclocal).read(): - return True - return False - - def find_rc_local(self): - """ - Determine if rc local is present. - """ - rclocal = "" - if os.path.exists(CustomScriptConstant.RC_LOCAL): - LOG.debug("rc.local detected.") - # resolving in case of symlink - rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL) - LOG.debug("rc.local resolved to %s", rclocal) - else: - LOG.warning("Can't find rc.local, post-customization " - "will be run before reboot") - return rclocal - - def install_agent(self): - rclocal = self.find_rc_local() - if rclocal: - self._install_post_reboot_agent(rclocal) - self.postreboot = True + self.ccScriptsDir = ccScriptsDir + self.ccScriptPath = os.path.join( + ccScriptsDir, + CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME) def execute(self): """ - This method executes post-customization script before or after reboot - based on the presence of rc local. + This method copy the post customize run script to + cc_scripts_per_instance directory and let this + module to run post custom script. """ self.prepare_script() - self.install_agent() - if not self.postreboot: - LOG.warning("Executing post-customization script inline") - util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) - else: - LOG.debug("Scheduling custom script to run post reboot") - if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): - os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) - # Script "post-customize-guest.sh" and user uploaded script are - # are present in the same directory and needs to copied to a temp - # directory to be executed post reboot. User uploaded script is - # saved as customize.sh in the temp directory. - # post-customize-guest.sh excutes customize.sh after reboot. - LOG.debug("Copying post-customization script") - util.copy(self.scriptpath, - CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") - LOG.debug("Copying script to run post-customization script") - util.copy( - os.path.join(self.directory, - CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), - CustomScriptConstant.POST_CUST_RUN_SCRIPT) - LOG.info("Creating post-reboot pending marker") - util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER) + + LOG.debug("Copying post customize run script to %s", + self.ccScriptPath) + util.copy( + os.path.join(self.directory, + CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME), + self.ccScriptPath) + st = os.stat(self.ccScriptPath) + os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC) + LOG.info("Creating post customization pending marker") + util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER) # vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/test_vmware/test_custom_script.py index 2d9519b0..f89f8157 100644 --- a/tests/unittests/test_vmware/test_custom_script.py +++ b/tests/unittests/test_vmware/test_custom_script.py @@ -1,10 +1,12 @@ # Copyright (C) 2015 Canonical Ltd. -# Copyright (C) 2017 VMware INC. +# Copyright (C) 2017-2019 VMware INC. # # Author: Maitreyee Saikia # # This file is part of cloud-init. See LICENSE file for license information. +import os +import stat from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptConstant, @@ -18,6 +20,10 @@ from cloudinit.tests.helpers import CiTestCase, mock class TestVmwareCustomScript(CiTestCase): def setUp(self): self.tmpDir = self.tmp_dir() + # Mock the tmpDir as the root dir in VM. + self.execDir = os.path.join(self.tmpDir, ".customization") + self.execScript = os.path.join(self.execDir, + ".customize.sh") def test_prepare_custom_script(self): """ @@ -37,63 +43,67 @@ class TestVmwareCustomScript(CiTestCase): # Custom script exists. custScript = self.tmp_path("test-cust", self.tmpDir) - util.write_file(custScript, "test-CR-strip/r/r") - postCust = PostCustomScript("test-cust", self.tmpDir) - self.assertEqual("test-cust", postCust.scriptname) - self.assertEqual(self.tmpDir, postCust.directory) - self.assertEqual(custScript, postCust.scriptpath) - self.assertFalse(postCust.postreboot) - postCust.prepare_script() - # Check if all carraige returns are stripped from script. - self.assertFalse("/r" in custScript) + util.write_file(custScript, "test-CR-strip\r\r") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + postCust = PostCustomScript("test-cust", + self.tmpDir, + self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + postCust.prepare_script() - def test_rc_local_exists(self): - """ - This test is designed to verify the different scenarios associated - with the presence of rclocal. - """ - # test when rc local does not exist - postCust = PostCustomScript("test-cust", self.tmpDir) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", "/no/path"): - rclocal = postCust.find_rc_local() - self.assertEqual("", rclocal) - - # test when rc local exists - rclocalFile = self.tmp_path("vmware-rclocal", self.tmpDir) - util.write_file(rclocalFile, "# Run post-reboot guest customization", - omode="w") - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalFile): - rclocal = postCust.find_rc_local() - self.assertEqual(rclocalFile, rclocal) - self.assertTrue(postCust.has_previous_agent, rclocal) - - # test when rc local is a symlink - rclocalLink = self.tmp_path("dummy-rclocal-link", self.tmpDir) - util.sym_link(rclocalFile, rclocalLink, True) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalLink): - rclocal = postCust.find_rc_local() - self.assertEqual(rclocalFile, rclocal) + # Custom script is copied with exec privilege + self.assertTrue(os.path.exists(self.execScript)) + st = os.stat(self.execScript) + self.assertTrue(st.st_mode & stat.S_IEXEC) + with open(self.execScript, "r") as f: + content = f.read() + self.assertEqual(content, "test-CR-strip") + # Check if all carraige returns are stripped from script. + self.assertFalse("\r" in content) def test_execute_post_cust(self): """ - This test is to identify if rclocal was properly populated to be - run after reboot. + This test is designed to verify the behavior after execute post + customization. """ - customscript = self.tmp_path("vmware-post-cust-script", self.tmpDir) - rclocal = self.tmp_path("vmware-rclocal", self.tmpDir) - # Create a temporary rclocal file - open(customscript, "w") - util.write_file(rclocal, "tests\nexit 0", omode="w") - postCust = PostCustomScript("vmware-post-cust-script", self.tmpDir) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocal): - # Test that guest customization agent is not installed initially. - self.assertFalse(postCust.postreboot) - self.assertIs(postCust.has_previous_agent(rclocal), False) - postCust.install_agent() + # Prepare the customize package + postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) + util.write_file(postCustRun, "This is the script to run post cust") + userScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(userScript, "This is the post cust script") - # Assert rclocal has been modified to have guest customization - # agent. - self.assertTrue(postCust.postreboot) - self.assertTrue(postCust.has_previous_agent, rclocal) + # Mock the cc_scripts_per_instance dir and marker file. + # Create another tmp dir for cc_scripts_per_instance. + ccScriptDir = self.tmp_dir() + ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") + markerFile = os.path.join(self.tmpDir, ".markerFile") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + with mock.patch.object(CustomScriptConstant, + "POST_CUSTOM_PENDING_MARKER", + markerFile): + postCust = PostCustomScript("test-cust", + self.tmpDir, + ccScriptDir) + postCust.execute() + # Check cc_scripts_per_instance and marker file + # are created. + self.assertTrue(os.path.exists(ccScript)) + with open(ccScript, "r") as f: + content = f.read() + self.assertEqual(content, + "This is the script to run post cust") + self.assertTrue(os.path.exists(markerFile)) # vi: ts=4 expandtab -- cgit v1.2.3 From 2f3bb764626b9065f4102c7c0a67998a9c174444 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 14 Aug 2019 21:03:13 +0000 Subject: Azure: Record boot timestamps, system information, and diagnostic events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Collect and record the following information through KVP:  + timestamps related to kernel initialization and systemd activation    of cloud-init services  + system information including cloud-init version, kernel version,    distro version, and python version  + diagnostic events for the most common provisioning error issues    such as empty dhcp lease, corrupted ovf-env.xml, etc. + increasing the log frequency of polling IMDS during reprovision. --- cloudinit/sources/DataSourceAzure.py | 157 ++++++++++++++++++++----- cloudinit/sources/helpers/azure.py | 160 ++++++++++++++++++++++++-- tests/unittests/test_datasource/test_azure.py | 15 ++- tests/unittests/test_reporting_hyperv.py | 65 +++++++++++ 4 files changed, 353 insertions(+), 44 deletions(-) mode change 100755 => 100644 tests/unittests/test_reporting_hyperv.py (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e6ed2f3b..4984fa84 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -26,9 +26,14 @@ from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util from cloudinit.reporting import events -from cloudinit.sources.helpers.azure import (azure_ds_reporter, - azure_ds_telemetry_reporter, - get_metadata_from_fabric) +from cloudinit.sources.helpers.azure import ( + azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric, + get_boot_telemetry, + get_system_info, + report_diagnostic_event, + EphemeralDHCPv4WithReporting) LOG = logging.getLogger(__name__) @@ -354,7 +359,7 @@ class DataSourceAzure(sources.DataSource): bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] LOG.debug("ssh authentication: " - "using fingerprint from fabirc") + "using fingerprint from fabric") with events.ReportEventStack( name="waiting-for-ssh-public-key", @@ -419,12 +424,17 @@ class DataSourceAzure(sources.DataSource): ret = load_azure_ds_dir(cdev) except NonAzureDataSource: + report_diagnostic_event( + "Did not find Azure data source in %s" % cdev) continue except BrokenAzureDataSource as exc: msg = 'BrokenAzureDataSource: %s' % exc + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) except util.MountFailedError: - LOG.warning("%s was not mountable", cdev) + msg = '%s was not mountable' % cdev + report_diagnostic_event(msg) + LOG.warning(msg) continue perform_reprovision = reprovision or self._should_reprovision(ret) @@ -432,6 +442,7 @@ class DataSourceAzure(sources.DataSource): if util.is_FreeBSD(): msg = "Free BSD is not supported for PPS VMs" LOG.error(msg) + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( @@ -450,7 +461,9 @@ class DataSourceAzure(sources.DataSource): break if not found: - raise sources.InvalidMetaDataException('No Azure metadata found') + msg = 'No Azure metadata found' + report_diagnostic_event(msg) + raise sources.InvalidMetaDataException(msg) if found == ddir: LOG.debug("using files cached in %s", ddir) @@ -469,9 +482,14 @@ class DataSourceAzure(sources.DataSource): self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral else: - with EphemeralDHCPv4() as lease: - self._report_ready(lease=lease) - + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter) as lease: + self._report_ready(lease=lease) + except Exception as e: + report_diagnostic_event( + "exception while reporting ready: %s" % e) + raise return crawled_data def _is_platform_viable(self): @@ -492,6 +510,16 @@ class DataSourceAzure(sources.DataSource): """ if not self._is_platform_viable(): return False + try: + get_boot_telemetry() + except Exception as e: + LOG.warning("Failed to get boot telemetry: %s", e) + + try: + get_system_info() + except Exception as e: + LOG.warning("Failed to get system information: %s", e) + try: crawled_data = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', @@ -551,27 +579,55 @@ class DataSourceAzure(sources.DataSource): headers = {"Metadata": "true"} nl_sock = None report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) + self.imds_logging_threshold = 1 + self.imds_poll_counter = 1 + dhcp_attempts = 0 + vnet_switched = False + return_val = None def exc_cb(msg, exception): if isinstance(exception, UrlError) and exception.code == 404: + if self.imds_poll_counter == self.imds_logging_threshold: + # Reducing the logging frequency as we are polling IMDS + self.imds_logging_threshold *= 2 + LOG.debug("Call to IMDS with arguments %s failed " + "with status code %s after %s retries", + msg, exception.code, self.imds_poll_counter) + LOG.debug("Backing off logging threshold for the same " + "exception to %d", self.imds_logging_threshold) + self.imds_poll_counter += 1 return True + # If we get an exception while trying to call IMDS, we # call DHCP and setup the ephemeral network to acquire the new IP. + LOG.debug("Call to IMDS with arguments %s failed with " + "status code %s", msg, exception.code) + report_diagnostic_event("polling IMDS failed with exception %s" + % exception.code) return False LOG.debug("Wait for vnetswitch to happen") while True: try: - # Save our EphemeralDHCPv4 context so we avoid repeated dhcp - self._ephemeral_dhcp_ctx = EphemeralDHCPv4() - lease = self._ephemeral_dhcp_ctx.obtain_lease() + # Save our EphemeralDHCPv4 context to avoid repeated dhcp + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=azure_ds_reporter): + self._ephemeral_dhcp_ctx = EphemeralDHCPv4() + lease = self._ephemeral_dhcp_ctx.obtain_lease() + + if vnet_switched: + dhcp_attempts += 1 if report_ready: try: nl_sock = netlink.create_bound_netlink_socket() except netlink.NetlinkCreateSocketError as e: + report_diagnostic_event(e) LOG.warning(e) self._ephemeral_dhcp_ctx.clean_network() - return + break + path = REPORTED_READY_MARKER_FILE LOG.info( "Creating a marker file to report ready: %s", path) @@ -579,17 +635,33 @@ class DataSourceAzure(sources.DataSource): pid=os.getpid(), time=time())) self._report_ready(lease=lease) report_ready = False - try: - netlink.wait_for_media_disconnect_connect( - nl_sock, lease['interface']) - except AssertionError as error: - LOG.error(error) - return + + with events.ReportEventStack( + name="wait-for-media-disconnect-connect", + description="wait for vnet switch", + parent=azure_ds_reporter): + try: + netlink.wait_for_media_disconnect_connect( + nl_sock, lease['interface']) + except AssertionError as error: + report_diagnostic_event(error) + LOG.error(error) + break + + vnet_switched = True self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, - headers=headers, exception_cb=exc_cb, - infinite=True, log_req_resp=False).contents + with events.ReportEventStack( + name="get-reprovision-data-from-imds", + description="get reprovision data from imds", + parent=azure_ds_reporter): + return_val = readurl(url, + timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, + exception_cb=exc_cb, + infinite=True, + log_req_resp=False).contents + break except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -598,6 +670,14 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + if vnet_switched: + report_diagnostic_event("attempted dhcp %d times after reuse" % + dhcp_attempts) + report_diagnostic_event("polled imds %d times after reuse" % + self.imds_poll_counter) + + return return_val + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ @@ -666,9 +746,12 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['agent_command']) try: fabric_data = metadata_func() - except Exception: + except Exception as e: + report_diagnostic_event( + "Error communicating with Azure fabric; You may experience " + "connectivity issues: %s" % e) LOG.warning( - "Error communicating with Azure fabric; You may experience." + "Error communicating with Azure fabric; You may experience " "connectivity issues.", exc_info=True) return False @@ -1027,7 +1110,9 @@ def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: - raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) + error_str = "Invalid ovf-env.xml: %s" % e + report_diagnostic_event(error_str) + raise BrokenAzureDataSource(error_str) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") @@ -1299,8 +1384,13 @@ def get_metadata_from_imds(fallback_nic, retries): if net.is_up(fallback_nic): return util.log_time(**kwargs) else: - with EphemeralDHCPv4(fallback_nic): - return util.log_time(**kwargs) + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter, fallback_nic): + return util.log_time(**kwargs) + except Exception as e: + report_diagnostic_event("exception while getting metadata: %s" % e) + raise @azure_ds_telemetry_reporter @@ -1313,11 +1403,14 @@ def _get_metadata_from_imds(retries): url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, retries=retries, exception_cb=retry_on_url_exc) except Exception as e: - LOG.debug('Ignoring IMDS instance metadata: %s', e) + msg = 'Ignoring IMDS instance metadata: %s' % e + report_diagnostic_event(msg) + LOG.debug(msg) return {} try: return util.load_json(str(response)) - except json.decoder.JSONDecodeError: + except json.decoder.JSONDecodeError as e: + report_diagnostic_event('non-json imds response' % e) LOG.warning( 'Ignoring non-json IMDS instance metadata: %s', str(response)) return {} @@ -1370,8 +1463,10 @@ def _is_platform_viable(seed_dir): asset_tag = util.read_dmi_data('chassis-asset-tag') if asset_tag == AZURE_CHASSIS_ASSET_TAG: return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag + LOG.debug(msg) + evt.description = msg + report_diagnostic_event(msg) if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): return True return False diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 82c4c8c4..f1fba175 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,7 +16,11 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit import version +from cloudinit import distros from cloudinit.reporting import events +from cloudinit.net.dhcp import EphemeralDHCPv4 +from datetime import datetime LOG = logging.getLogger(__name__) @@ -24,6 +28,10 @@ LOG = logging.getLogger(__name__) # value is applied if the endpoint can't be found within a lease file DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" +BOOT_EVENT_TYPE = 'boot-telemetry' +SYSTEMINFO_EVENT_TYPE = 'system-info' +DIAGNOSTIC_EVENT_TYPE = 'diagnostic' + azure_ds_reporter = events.ReportEventStack( name="azure-ds", description="initialize reporter for azure ds", @@ -40,6 +48,105 @@ def azure_ds_telemetry_reporter(func): return impl +@azure_ds_telemetry_reporter +def get_boot_telemetry(): + """Report timestamps related to kernel initialization and systemd + activation of cloud-init""" + if not distros.uses_systemd(): + raise RuntimeError( + "distro not using systemd, skipping boot telemetry") + + LOG.debug("Collecting boot telemetry") + try: + kernel_start = float(time.time()) - float(util.uptime()) + except ValueError: + raise RuntimeError("Failed to determine kernel start timestamp") + + try: + out, _ = util.subp(['/bin/systemctl', + 'show', '-p', + 'UserspaceTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + + if not tsm: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd") + + user_start = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd: %s" + % e) + + try: + out, _ = util.subp(['/bin/systemctl', 'show', + 'cloud-init-local', '-p', + 'InactiveExitTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + if not tsm: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd") + + cloudinit_activation = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd: %s" + % e) + + evt = events.ReportingEvent( + BOOT_EVENT_TYPE, 'boot-telemetry', + "kernel_start=%s user_start=%s cloudinit_activation=%s" % + (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z', + datetime.utcfromtimestamp(user_start).isoformat() + 'Z', + datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'), + events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +@azure_ds_telemetry_reporter +def get_system_info(): + """Collect and report system information""" + info = util.system_info() + evt = events.ReportingEvent( + SYSTEMINFO_EVENT_TYPE, 'system information', + "cloudinit_version=%s, kernel_version=%s, variant=%s, " + "distro_name=%s, distro_version=%s, flavor=%s, " + "python_version=%s" % + (version.version_string(), info['release'], info['variant'], + info['dist'][0], info['dist'][1], info['dist'][2], + info['python']), events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +def report_diagnostic_event(str): + """Report a diagnostic event""" + evt = events.ReportingEvent( + DIAGNOSTIC_EVENT_TYPE, 'diagnostic message', + str, events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -360,16 +467,19 @@ class WALinuxAgentShim(object): value = dhcp245 LOG.debug("Using Azure Endpoint from dhcp options") if value is None: + report_diagnostic_event("No Azure endpoint from dhcp options") LOG.debug('Finding Azure endpoint from networkd...') value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/.json # a dhclient exit hook that calls cloud-init-dhclient-hook + report_diagnostic_event("No Azure endpoint from networkd") LOG.debug('Finding Azure endpoint from hook json...') dhcp_options = WALinuxAgentShim._load_dhclient_json() value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options) if value is None: # Fallback and check the leases file if unsuccessful + report_diagnostic_event("No Azure endpoint from dhclient logs") LOG.debug("Unable to find endpoint in dhclient logs. " " Falling back to check lease files") if fallback_lease_file is None: @@ -381,11 +491,15 @@ class WALinuxAgentShim(object): value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) if value is None: - LOG.warning("No lease found; using default endpoint") + msg = "No lease found; using default endpoint" + report_diagnostic_event(msg) + LOG.warning(msg) value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + msg = 'Azure endpoint found at %s' % endpoint_ip_address + report_diagnostic_event(msg) + LOG.debug(msg) return endpoint_ip_address @azure_ds_telemetry_reporter @@ -399,16 +513,19 @@ class WALinuxAgentShim(object): try: response = http_client.get( 'http://{0}/machine/?comp=goalstate'.format(self.endpoint)) - except Exception: + except Exception as e: if attempts < 10: time.sleep(attempts + 1) else: + report_diagnostic_event( + "failed to register with Azure: %s" % e) raise else: break attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) + report_diagnostic_event("container_id %s" % goal_state.container_id) ssh_keys = [] if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') @@ -449,11 +566,20 @@ class WALinuxAgentShim(object): container_id=goal_state.container_id, instance_id=goal_state.instance_id, ) - http_client.post( - "http://{0}/machine?comp=health".format(self.endpoint), - data=document, - extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, - ) + # Host will collect kvps when cloud-init reports ready. + # some kvps might still be in the queue. We yield the scheduler + # to make sure we process all kvps up till this point. + time.sleep(0) + try: + http_client.post( + "http://{0}/machine?comp=health".format(self.endpoint), + data=document, + extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, + ) + except Exception as e: + report_diagnostic_event("exception while reporting ready: %s" % e) + raise + LOG.info('Reported ready to Azure fabric.') @@ -467,4 +593,22 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, finally: shim.clean_up() + +class EphemeralDHCPv4WithReporting(object): + def __init__(self, reporter, nic=None): + self.reporter = reporter + self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic) + + def __enter__(self): + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=self.reporter): + return self.ephemeralDHCPv4.__enter__() + + def __exit__(self, excp_type, excp_value, excp_traceback): + self.ephemeralDHCPv4.__exit__( + excp_type, excp_value, excp_traceback) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 4d57cebc..3547dd94 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -181,7 +181,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): self.logs.getvalue()) @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') @mock.patch(MOCKPATH + 'net.is_up') def test_get_metadata_performs_dhcp_when_network_is_down( self, m_net_is_up, m_dhcp, m_readurl): @@ -195,7 +195,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): dsaz.get_metadata_from_imds('eth9', retries=2)) m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_called_with('eth9') + m_dhcp.assert_called_with(mock.ANY, 'eth9') self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time self.logs.getvalue()) @@ -552,7 +552,8 @@ scbus-1 on xpt0 bus 0 dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) - @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') @mock.patch( 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') @@ -1308,7 +1309,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) - def test_environment_correct_for_bounce_command(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_environment_correct_for_bounce_command( + self, mock_get_boot_telemetry): interface = 'int0' hostname = 'my-new-host' old_hostname = 'my-old-host' @@ -1324,7 +1327,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(hostname, bounce_env['hostname']) self.assertEqual(old_hostname, bounce_env['old_hostname']) - def test_default_bounce_command_ifup_used_by_default(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_default_bounce_command_ifup_used_by_default( + self, mock_get_boot_telemetry): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py old mode 100755 new mode 100644 index d01ed5b3..640895a4 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -7,9 +7,12 @@ import json import os import struct import time +import re +import mock from cloudinit import util from cloudinit.tests.helpers import CiTestCase +from cloudinit.sources.helpers import azure class TestKvpEncoding(CiTestCase): @@ -126,3 +129,65 @@ class TextKvpReporter(CiTestCase): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) kvps = list(reporter._iterate_kvps(0)) self.assertEqual(0, len(kvps)) + + @mock.patch('cloudinit.distros.uses_systemd') + @mock.patch('cloudinit.util.subp') + def test_get_boot_telemetry(self, m_subp, m_sysd): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" + r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)" + + # get_boot_telemetry makes two subp calls to systemctl. We provide + # a list of values that the subp calls should return + m_subp.side_effect = [ + ('UserspaceTimestampMonotonic=1844838', ''), + ('InactiveExitTimestampMonotonic=3068203', '')] + m_sysd.return_value = True + + reporter.publish_event(azure.get_boot_telemetry()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + evt_msg = kvps[0]['value'] + if not re.search("kernel_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing kernel_start timestamp") + if not re.search("user_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing user_start timestamp") + if not re.search("cloudinit_activation=" + datetime_pattern, + evt_msg): + raise AssertionError( + "missing cloudinit_activation timestamp") + + def test_get_system_info(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + pattern = r"[^=\s]+" + + reporter.publish_event(azure.get_system_info()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + # the most important information is cloudinit version, + # kernel_version, and the distro variant. It is ok if + # if the rest is not available + if not re.search("cloudinit_version=" + pattern, evt_msg): + raise AssertionError("missing cloudinit_version string") + if not re.search("kernel_version=" + pattern, evt_msg): + raise AssertionError("missing kernel_version string") + if not re.search("variant=" + pattern, evt_msg): + raise AssertionError("missing distro variant string") + + def test_report_diagnostic_event(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + + reporter.publish_event( + azure.report_diagnostic_event("test_diagnostic")) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + if "test_diagnostic" not in evt_msg: + raise AssertionError("missing expected diagnostic message") -- cgit v1.2.3 From 45426d8d38a7224962867ba71f390cce653e0d17 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Wed, 11 Sep 2019 18:53:01 +0000 Subject: VMWware: add option into VMTools config to enable/disable custom script. VMWware customization already has support to run a custom script during the VM customization. Adding this option allows a VM administrator to disable the execution of customization scripts. If set the script will not execute and the customization status is set to GUESTCUST_ERROR_SCRIPT_DISABLED. --- cloudinit/sources/DataSourceOVF.py | 21 ++++++- .../sources/helpers/vmware/imc/guestcust_error.py | 1 + .../sources/helpers/vmware/imc/guestcust_util.py | 37 ++++++++++++ tests/unittests/test_datasource/test_ovf.py | 55 +++++++++++++++--- tests/unittests/test_vmware/test_guestcust_util.py | 65 ++++++++++++++++++++++ 5 files changed, 169 insertions(+), 10 deletions(-) create mode 100644 tests/unittests/test_vmware/test_guestcust_util.py (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index dd941d2e..b1561892 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -40,11 +40,15 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_state \ from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( enable_nics, get_nics_to_enable, - set_customization_status + set_customization_status, + get_tools_config ) LOG = logging.getLogger(__name__) +CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg" +GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts" + class DataSourceOVF(sources.DataSource): @@ -148,6 +152,21 @@ class DataSourceOVF(sources.DataSource): product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name + custScriptConfig = get_tools_config( + CONFGROUPNAME_GUESTCUSTOMIZATION, + GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, + "true") + if custScriptConfig.lower() == "false": + # Update the customization status if there is a + # custom script is disabled + if special_customization and customscript: + msg = "Custom script is disabled by VM Administrator" + LOG.debug(msg) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED) + raise RuntimeError(msg) + ccScriptsDir = os.path.join( self.paths.get_cpath("scripts"), "per-instance") diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py index db5a00dc..65ae7390 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -10,5 +10,6 @@ class GuestCustErrorEnum(object): """Specifies different errors of Guest Customization engine""" GUESTCUST_ERROR_SUCCESS = 0 + GUESTCUST_ERROR_SCRIPT_DISABLED = 6 # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index a590f323..eb78172e 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -7,6 +7,7 @@ import logging import os +import re import time from cloudinit import util @@ -117,4 +118,40 @@ def enable_nics(nics): logger.warning("Can't connect network interfaces after %d attempts", enableNicsWaitRetries) + +def get_tools_config(section, key, defaultVal): + """ Return the value of [section] key from VMTools configuration. + + @param section: String of section to read from VMTools config + @returns: String value from key in [section] or defaultVal if + [section] is not present or vmware-toolbox-cmd is + not installed. + """ + + if not util.which('vmware-toolbox-cmd'): + logger.debug( + 'vmware-toolbox-cmd not installed, returning default value') + return defaultVal + + retValue = defaultVal + cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key] + + try: + (outText, _) = util.subp(cmd) + m = re.match(r'([a-zA-Z0-9 ]+)=(.*)', outText) + if m: + retValue = m.group(2).strip() + logger.debug("Get tools config: [%s] %s = %s", + section, key, retValue) + else: + logger.debug( + "Tools config: [%s] %s is not found, return default value: %s", + section, key, retValue) + except util.ProcessExecutionError as e: + logger.error("Failed running %s[%s]", cmd, e.exit_code) + logger.exception(e) + + return retValue + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 349d54cc..a615470a 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -169,19 +169,56 @@ class TestDatasourceOVF(CiTestCase): MARKER-ID = 12345345 """) util.write_file(conf_file, conf_content) - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) + with mock.patch(MPATH + 'get_tools_config', return_value='true'): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) customscript = self.tmp_path('test-script', self.tdir) self.assertIn('Script %s not found!!' % customscript, str(context.exception)) + def test_get_data_cust_script_disabled(self): + """If custom script is disabled by VMware tools configuration, + raise a RuntimeError. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + # Prepare the custom sript + customscript = self.tmp_path('test-script', self.tdir) + util.write_file(customscript, "This is the post cust script") + + with mock.patch(MPATH + 'get_tools_config', return_value='false'): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(RuntimeError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + self.assertIn('Custom script is disabled by VM Administrator', + str(context.exception)) + def test_get_data_non_vmware_seed_platform_info(self): """Platform info properly reports when on non-vmware platforms.""" paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py new file mode 100644 index 00000000..b8fa9942 --- /dev/null +++ b/tests/unittests/test_vmware/test_guestcust_util.py @@ -0,0 +1,65 @@ +# Copyright (C) 2019 Canonical Ltd. +# Copyright (C) 2019 VMware INC. +# +# Author: Xiaofeng Wang +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + get_tools_config, +) +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestGuestCustUtil(CiTestCase): + def test_get_tools_config_not_installed(self): + """ + This test is designed to verify the behavior if vmware-toolbox-cmd + is not installed. + """ + with mock.patch.object(util, 'which', return_value=None): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') + + def test_get_tools_config_internal_exception(self): + """ + This test is designed to verify the behavior if internal exception + is raised. + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + with mock.patch.object(util, 'subp', + return_value=('key=value', b''), + side_effect=util.ProcessExecutionError( + "subp failed", exit_code=99)): + # verify return value is 'defaultVal', not 'value'. + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'defaultVal') + + def test_get_tools_config_normal(self): + """ + This test is designed to verify the value could be parsed from + key = value of the given [section] + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + # value is not blank + with mock.patch.object(util, 'subp', + return_value=('key = value ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'value') + # value is blank + with mock.patch.object(util, 'subp', + return_value=('key = ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + '') + # value contains = + with mock.patch.object(util, 'subp', + return_value=('key=Bar=Wark', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'Bar=Wark') + +# vi: ts=4 expandtab -- cgit v1.2.3 From fac98983187c0984aa79c569c4b76cab90fd6f47 Mon Sep 17 00:00:00 2001 From: Harald Jensås Date: Wed, 16 Oct 2019 15:30:28 +0000 Subject: net: handle openstack dhcpv6-stateless configuration Openstack subnets can be configured to use SLAAC by setting ipv6_address_mode=dhcpv6-stateless. When this is the case the sysconfig interface configuration should use IPV6_AUTOCONF=yes and not set DHCPV6C=yes. This change sets the subnets type property to the full network['type'] from openstack metadata. cloudinit/net/sysconfig.py and cloudinit/net/eni.py are updated to support new subnet types: - 'ipv6_dhcpv6-stateless' => IPV6_AUTOCONF=yes - 'ipv6_dhcpv6-stateful' => DHCPV6C=yes Type 'dhcp6' in sysconfig is kept for backward compatibility with any implementations that set subnet_type == 'dhcp6'. LP: #1847517 --- cloudinit/net/eni.py | 7 +- cloudinit/net/sysconfig.py | 7 +- cloudinit/sources/helpers/openstack.py | 3 +- .../unittests/test_datasource/test_configdrive.py | 39 ++++++++++ tests/unittests/test_net.py | 88 ++++++++++++++++++++++ 5 files changed, 141 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index b129bb62..530922b5 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -411,8 +411,13 @@ class Renderer(renderer.Renderer): else: ipv4_subnet_mtu = subnet.get('mtu') iface['inet'] = subnet_inet - if subnet['type'].startswith('dhcp'): + if (subnet['type'] == 'dhcp4' or subnet['type'] == 'dhcp6' or + subnet['type'] == 'ipv6_dhcpv6-stateful'): + # Configure network settings using DHCP or DHCPv6 iface['mode'] = 'dhcp' + elif subnet['type'] == 'ipv6_dhcpv6-stateless': + # Configure network settings using SLAAC from RAs + iface['mode'] = 'auto' # do not emit multiple 'auto $IFACE' lines as older (precise) # ifupdown complains diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 87b548e5..4e656768 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -343,10 +343,15 @@ class Renderer(renderer.Renderer): for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): mtu_key = 'MTU' subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': + if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful': # TODO need to set BOOTPROTO to dhcp6 on SUSE iface_cfg['IPV6INIT'] = True + # Configure network settings using DHCPv6 iface_cfg['DHCPV6C'] = True + elif subnet_type == 'ipv6_dhcpv6-stateless': + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs + iface_cfg['IPV6_AUTOCONF'] = True elif subnet_type in ['dhcp4', 'dhcp']: iface_cfg['BOOTPROTO'] = 'dhcp' elif subnet_type == 'static': diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 8f069115..d1c4601a 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -585,7 +585,8 @@ def convert_net_json(network_json=None, known_macs=None): subnet = dict((k, v) for k, v in network.items() if k in valid_keys['subnet']) if 'dhcp' in network['type']: - t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' + t = (network['type'] if network['type'].startswith('ipv6') + else 'dhcp4') subnet.update({ 'type': t, }) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 520c50fe..8c788c1c 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -499,6 +499,45 @@ class TestNetJson(CiTestCase): known_macs=KNOWN_MACS) self.assertEqual(myds.network_config, network_config) + def test_network_config_conversion_dhcp6(self): + """Test some ipv6 input network json and check the expected + conversions.""" + in_data = { + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + ] + } + out_data = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:69:b0:58', + 'mtu': None, + 'name': 'enp0s1', + 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], + 'type': 'physical'}, + {'mac_address': 'fa:16:3e:d4:57:ad', + 'mtu': None, + 'name': 'enp0s2', + 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], + 'type': 'physical'} + ], + } + conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) + self.assertEqual(out_data, conv_data) + def test_network_config_conversions(self): """Tests a bunch of input network json and checks the expected conversions.""" diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b6597412..f5a9cae6 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1070,6 +1070,82 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_stateless': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateless'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_stateful': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateful'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'all': { 'expected_eni': ("""\ auto lo @@ -2781,6 +2857,18 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_dhcpv6_stateless_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_stateful_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateful'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + def test_check_ifcfg_rh(self): """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" render_dir = self.tmp_dir() -- cgit v1.2.3 From ecb501b84338f078be18c38c68c3ce87fed3584b Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Thu, 17 Oct 2019 15:18:44 +0000 Subject: guestcust_util: handle special characters in config file Handle the special characters when reading VM Tools configure file. For example, the key and value may contain _, - and . etc. --- cloudinit/sources/helpers/vmware/imc/guestcust_util.py | 2 +- tests/unittests/test_vmware/test_guestcust_util.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index eb78172e..3d369d04 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -138,7 +138,7 @@ def get_tools_config(section, key, defaultVal): try: (outText, _) = util.subp(cmd) - m = re.match(r'([a-zA-Z0-9 ]+)=(.*)', outText) + m = re.match(r'([^=]+)=(.*)', outText) if m: retValue = m.group(2).strip() logger.debug("Get tools config: [%s] %s = %s", diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py index b8fa9942..b175a998 100644 --- a/tests/unittests/test_vmware/test_guestcust_util.py +++ b/tests/unittests/test_vmware/test_guestcust_util.py @@ -62,4 +62,11 @@ class TestGuestCustUtil(CiTestCase): get_tools_config('section', 'key', 'defaultVal'), 'Bar=Wark') + # value contains specific characters + with mock.patch.object(util, 'subp', + return_value=('[a] b.c_d=e-f', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'e-f') + # vi: ts=4 expandtab -- cgit v1.2.3 From 62bbc262c3c7f633eac1d09ec78c055eef05166a Mon Sep 17 00:00:00 2001 From: Harald Date: Wed, 20 Nov 2019 18:55:27 +0100 Subject: net: IPv6, accept_ra, slaac, stateless (#51) Router advertisements are required for the default route to be set up, thus accept_ra should be enabled for dhcpv6-stateful. sysconf: IPV6_FORCE_ACCEPT_RA controls accept_ra sysctl. eni: mode static and mode dhcp 'accept_ra' controls sysctl. Add 'accept-ra: true|false' parameter to config v1 and v2. When True: accept_ra is set to '1'. When False: accept_ra is set to '0'. When not defined in config the value is left to the operating system default. This change also extend the IPv6 support to distinguish between slaac and dhcpv6-stateless. SLAAC is autoconfig without any options from DHCP, while stateless auto-configures the address and the uses DHCP for other options. LP: #1806014 LP: #1808647 --- cloudinit/net/eni.py | 15 ++ cloudinit/net/netplan.py | 9 +- cloudinit/net/network_state.py | 21 +- cloudinit/net/sysconfig.py | 34 ++- cloudinit/sources/helpers/openstack.py | 21 +- .../unittests/test_datasource/test_configdrive.py | 3 +- tests/unittests/test_net.py | 238 ++++++++++++++++++++- 7 files changed, 320 insertions(+), 21 deletions(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index a9a80c95..70771060 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -399,6 +399,7 @@ class Renderer(renderer.Renderer): def _render_iface(self, iface, render_hwaddress=False): sections = [] subnets = iface.get('subnets', {}) + accept_ra = iface.pop('accept-ra', None) if subnets: for index, subnet in enumerate(subnets): ipv4_subnet_mtu = None @@ -415,9 +416,23 @@ class Renderer(renderer.Renderer): subnet['type'] == 'ipv6_dhcpv6-stateful'): # Configure network settings using DHCP or DHCPv6 iface['mode'] = 'dhcp' + if accept_ra is not None: + # Accept router advertisements (0=off, 1=on) + iface['accept_ra'] = '1' if accept_ra else '0' elif subnet['type'] == 'ipv6_dhcpv6-stateless': # Configure network settings using SLAAC from RAs iface['mode'] = 'auto' + # Use stateless DHCPv6 (0=off, 1=on) + iface['dhcp'] = '1' + elif subnet['type'] == 'ipv6_slaac': + # Configure network settings using SLAAC from RAs + iface['mode'] = 'auto' + # Use stateless DHCPv6 (0=off, 1=on) + iface['dhcp'] = '0' + elif subnet_is_ipv6(subnet) and subnet['type'] == 'static': + if accept_ra is not None: + # Accept router advertisements (0=off, 1=on) + iface['accept_ra'] = '1' if accept_ra else '0' # do not emit multiple 'auto $IFACE' lines as older (precise) # ifupdown complains diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 749d46f8..14d3999f 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -4,7 +4,7 @@ import copy import os from . import renderer -from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2 +from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES from cloudinit import log as logging from cloudinit import util @@ -52,7 +52,8 @@ def _extract_addresses(config, entry, ifname, features=None): 'mtu': 1480, 'netmask': 64, 'type': 'static'}], - 'type: physical' + 'type: physical', + 'accept-ra': 'true' } An entry dictionary looks like: @@ -95,6 +96,8 @@ def _extract_addresses(config, entry, ifname, features=None): if sn_type == 'dhcp': sn_type += '4' entry.update({sn_type: True}) + elif sn_type in IPV6_DYNAMIC_TYPES: + entry.update({'dhcp6': True}) elif sn_type in ['static']: addr = "%s" % subnet.get('address') if 'prefix' in subnet: @@ -147,6 +150,8 @@ def _extract_addresses(config, entry, ifname, features=None): ns = entry.get('nameservers', {}) ns.update({'search': searchdomains}) entry.update({'nameservers': ns}) + if 'accept-ra' in config and config['accept-ra'] is not None: + entry.update({'accept-ra': util.is_true(config.get('accept-ra'))}) def _extract_bond_slaves_by_name(interfaces, entry, bond_master): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 20b7716b..7d206a1a 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -18,13 +18,17 @@ from cloudinit import util LOG = logging.getLogger(__name__) NETWORK_STATE_VERSION = 1 +IPV6_DYNAMIC_TYPES = ['dhcp6', + 'ipv6_slaac', + 'ipv6_dhcpv6-stateless', + 'ipv6_dhcpv6-stateful'] NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } NETWORK_V2_KEY_FILTER = [ 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides', 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers', - 'renderer', 'set-name', 'wakeonlan' + 'renderer', 'set-name', 'wakeonlan', 'accept-ra' ] NET_CONFIG_TO_V2 = { @@ -342,7 +346,8 @@ class NetworkStateInterpreter(object): 'name': 'eth0', 'subnets': [ {'type': 'dhcp4'} - ] + ], + 'accept-ra': 'true' } ''' @@ -362,6 +367,9 @@ class NetworkStateInterpreter(object): self.use_ipv6 = True break + accept_ra = command.get('accept-ra', None) + if accept_ra is not None: + accept_ra = util.is_true(accept_ra) iface.update({ 'name': command.get('name'), 'type': command.get('type'), @@ -372,6 +380,7 @@ class NetworkStateInterpreter(object): 'address': None, 'gateway': None, 'subnets': subnets, + 'accept-ra': accept_ra }) self._network_state['interfaces'].update({command.get('name'): iface}) self.dump_network_state() @@ -615,6 +624,7 @@ class NetworkStateInterpreter(object): driver: ixgbe set-name: lom1 dhcp6: true + accept-ra: true switchports: match: name: enp2* @@ -643,7 +653,7 @@ class NetworkStateInterpreter(object): driver = match.get('driver', None) if driver: phy_cmd['params'] = {'driver': driver} - for key in ['mtu', 'match', 'wakeonlan']: + for key in ['mtu', 'match', 'wakeonlan', 'accept-ra']: if key in cfg: phy_cmd[key] = cfg[key] @@ -928,8 +938,9 @@ def is_ipv6_addr(address): def subnet_is_ipv6(subnet): """Common helper for checking network_state subnets for ipv6.""" - # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful' or 'ipv6_dhcpv6-stateless' - if subnet['type'].endswith('6') or subnet['type'].startswith('ipv6'): + # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or + # 'ipv6_slaac' + if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES: # This is a request for DHCPv6. return True elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index fe0c67ca..310cdf01 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -14,7 +14,7 @@ from configobj import ConfigObj from . import renderer from .network_state import ( - is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6) + is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) LOG = logging.getLogger(__name__) NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" @@ -335,6 +335,9 @@ class Renderer(renderer.Renderer): continue iface_cfg[new_key] = old_value + if iface['accept-ra'] is not None: + iface_cfg['IPV6_FORCE_ACCEPT_RA'] = iface['accept-ra'] + @classmethod def _render_subnets(cls, iface_cfg, subnets, has_default_route): # setting base values @@ -350,6 +353,15 @@ class Renderer(renderer.Renderer): # Configure network settings using DHCPv6 iface_cfg['DHCPV6C'] = True elif subnet_type == 'ipv6_dhcpv6-stateless': + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs and optional + # info from dhcp server using DHCPv6 + iface_cfg['IPV6_AUTOCONF'] = True + iface_cfg['DHCPV6C'] = True + # Use Information-request to get only stateless configuration + # parameters (i.e., without address). + iface_cfg['DHCPV6C_OPTIONS'] = '-S' + elif subnet_type == 'ipv6_slaac': iface_cfg['IPV6INIT'] = True # Configure network settings using SLAAC from RAs iface_cfg['IPV6_AUTOCONF'] = True @@ -398,10 +410,15 @@ class Renderer(renderer.Renderer): # metric may apply to both dhcp and static config if 'metric' in subnet: iface_cfg['METRIC'] = subnet['metric'] + # TODO(hjensas): Including dhcp6 here is likely incorrect. DHCPv6 + # does not ever provide a default gateway, the default gateway + # come from RA's. (https://github.com/openSUSE/wicked/issues/570) if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: if has_default_route and iface_cfg['BOOTPROTO'] != 'none': iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False continue + elif subnet_type in IPV6_DYNAMIC_TYPES: + continue elif subnet_type == 'static': if subnet_is_ipv6(subnet): ipv6_index = ipv6_index + 1 @@ -444,10 +461,14 @@ class Renderer(renderer.Renderer): @classmethod def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): for _, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') for route in subnet.get('routes', []): is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway']) - if _is_default_route(route): + # Any dynamic configuration method, slaac, dhcpv6-stateful/ + # stateless should get router information from router RA's. + if (_is_default_route(route) and subnet_type not in + IPV6_DYNAMIC_TYPES): if ( (subnet.get('ipv4') and route_cfg.has_set_default_ipv4) or @@ -466,10 +487,17 @@ class Renderer(renderer.Renderer): # TODO(harlowja): add validation that no other iface has # also provided the default route? iface_cfg['DEFROUTE'] = True + # TODO(hjensas): Including dhcp6 here is likely incorrect. + # DHCPv6 does not ever provide a default gateway, the + # default gateway come from RA's. + # (https://github.com/openSUSE/wicked/issues/570) if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'): + # NOTE(hjensas): DHCLIENT_SET_DEFAULT_ROUTE is SuSE + # only. RHEL, CentOS, Fedora does not implement this + # option. iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True if 'gateway' in route: - if is_ipv6 or is_ipv6_addr(route['gateway']): + if is_ipv6: iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] route_cfg.has_set_default_ipv6 = True else: diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index d1c4601a..0778f45a 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -584,17 +584,24 @@ def convert_net_json(network_json=None, known_macs=None): if n['link'] == link['id']]: subnet = dict((k, v) for k, v in network.items() if k in valid_keys['subnet']) - if 'dhcp' in network['type']: - t = (network['type'] if network['type'].startswith('ipv6') - else 'dhcp4') - subnet.update({ - 'type': t, - }) - else: + + if network['type'] == 'ipv4_dhcp': + subnet.update({'type': 'dhcp4'}) + elif network['type'] == 'ipv6_dhcp': + subnet.update({'type': 'dhcp6'}) + elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless', + 'ipv6_dhcpv6-stateful']: + subnet.update({'type': network['type']}) + elif network['type'] in ['ipv4', 'ipv6']: subnet.update({ 'type': 'static', 'address': network.get('ip_address'), }) + + # Enable accept_ra for stateful and legacy ipv6_dhcp types + if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']: + cfg.update({'accept-ra': True}) + if network['type'] == 'ipv4': subnet['ipv4'] = True if network['type'] == 'ipv6': diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index cfb3b0a7..6f830cc6 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -547,7 +547,8 @@ class TestNetJson(CiTestCase): 'mtu': None, 'name': 'enp0s2', 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], - 'type': 'physical'} + 'type': 'physical', + 'accept-ra': True} ], } conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 35ce55d2..0f45dc38 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1070,6 +1070,143 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_accept_ra': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 1 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + accept-ra: true + dhcp6: true + """).rstrip(' '), + 'yaml_v1': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: true + """).rstrip(' '), + 'yaml_v2': textwrap.dedent("""\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: true + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_reject_ra': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 0 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + accept-ra: false + dhcp6: true + """).rstrip(' '), + 'yaml_v1': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: false + """).rstrip(' '), + 'yaml_v2': textwrap.dedent("""\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: false + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=no + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'ipv6_slaac': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 0 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_slaac'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'dhcpv6_stateless': { 'expected_eni': textwrap.dedent("""\ auto lo @@ -1077,6 +1214,7 @@ NETWORK_CONFIGS = { auto iface0 iface iface0 inet6 auto + dhcp 1 """).rstrip(' '), 'expected_netplan': textwrap.dedent(""" network: @@ -1097,6 +1235,8 @@ NETWORK_CONFIGS = { 'ifcfg-iface0': textwrap.dedent("""\ BOOTPROTO=none DEVICE=iface0 + DHCPV6C=yes + DHCPV6C_OPTIONS=-S IPV6_AUTOCONF=yes IPV6INIT=yes DEVICE=iface0 @@ -1121,6 +1261,7 @@ NETWORK_CONFIGS = { version: 2 ethernets: iface0: + accept-ra: true dhcp6: true """).rstrip(' '), 'yaml': textwrap.dedent("""\ @@ -1130,6 +1271,7 @@ NETWORK_CONFIGS = { name: 'iface0' subnets: - {'type': 'ipv6_dhcpv6-stateful'} + accept-ra: true """).rstrip(' '), 'expected_sysconfig': { 'ifcfg-iface0': textwrap.dedent("""\ @@ -1137,6 +1279,7 @@ NETWORK_CONFIGS = { DEVICE=iface0 DHCPV6C=yes IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes DEVICE=iface0 NM_CONTROLLED=no ONBOOT=yes @@ -2884,6 +3027,34 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_dhcpv6_accept_ra_config_v1(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_accept_ra_config_v2(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_reject_ra_config_v1(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_reject_ra_config_v2(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + def test_dhcpv6_stateless_config(self): entry = NETWORK_CONFIGS['dhcpv6_stateless'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) @@ -4022,6 +4193,46 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_dhcpv6_accept_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_reject_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_ipv6_slaac(self): + entry = NETWORK_CONFIGS['ipv6_slaac'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_stateless(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_stateful(self): + entry = NETWORK_CONFIGS['dhcpv6_stateful'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_all(self): entry = NETWORK_CONFIGS['all'] files = self._render_and_read(network_config=yaml.load(entry['yaml'])) @@ -4154,16 +4365,37 @@ class TestEniRoundTrip(CiTestCase): def testsimple_render_dhcpv6_stateless(self): entry = NETWORK_CONFIGS['dhcpv6_stateless'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_ipv6_slaac(self): + entry = NETWORK_CONFIGS['ipv6_slaac'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) self.assertEqual( entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) def testsimple_render_dhcpv6_stateful(self): entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_accept_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] files = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + entry['yaml_v1'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_reject_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) self.assertEqual( entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) -- cgit v1.2.3 From f69d33a723b805fec3ee70c3a6127c8cadcb02d8 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 2 Dec 2019 16:24:18 -0700 Subject: url_helper: read_file_or_url should pass headers param into readurl (#66) Headers param was accidentally omitted and no longer passed through to readurl due to a previous commit. To avoid this omission of params in the future, drop positional param definitions from read_file_or_url and pass all kwargs through to readurl when we are not operating on a file. In util:read_seeded, correct the case where invalid positional param file_retries was being passed into read_file_or_url. Also drop duplicated file:// prefix addition from read_seeded because read_file_or_url does that work anyway. LP: #1854084 --- cloudinit/sources/helpers/azure.py | 6 ++- cloudinit/tests/test_url_helper.py | 52 ++++++++++++++++++++++ cloudinit/url_helper.py | 47 +++++++++++++++---- cloudinit/user_data.py | 2 +- cloudinit/util.py | 15 ++----- .../unittests/test_datasource/test_azure_helper.py | 18 +++++--- 6 files changed, 112 insertions(+), 28 deletions(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index f1fba175..f5cdb3fd 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -183,14 +183,16 @@ class AzureEndpointHttpClient(object): if secure: headers = self.headers.copy() headers.update(self.extra_secure_headers) - return url_helper.read_file_or_url(url, headers=headers) + return url_helper.read_file_or_url(url, headers=headers, timeout=5, + retries=10) def post(self, url, data=None, extra_headers=None): headers = self.headers if extra_headers is not None: headers = self.headers.copy() headers.update(extra_headers) - return url_helper.read_file_or_url(url, data=data, headers=headers) + return url_helper.read_file_or_url(url, data=data, headers=headers, + timeout=5, retries=10) class GoalState(object): diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index aa9f3ec1..e883ddc2 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -4,6 +4,7 @@ from cloudinit.url_helper import ( NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) from cloudinit.tests.helpers import CiTestCase, mock, skipIf from cloudinit import util +from cloudinit import version import httpretty import requests @@ -17,6 +18,9 @@ except ImportError: _missing_oauthlib_dep = True +M_PATH = 'cloudinit.url_helper.' + + class TestOAuthHeaders(CiTestCase): def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): @@ -67,6 +71,54 @@ class TestReadFileOrUrl(CiTestCase): self.assertEqual(result.contents, data) self.assertEqual(str(result), data.decode('utf-8')) + @mock.patch(M_PATH + 'readurl') + def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): + """read_file_or_url passes all params through to readurl.""" + url = 'http://hostname/path' + response = 'This is my url content\n' + m_readurl.return_value = response + params = {'url': url, 'timeout': 1, 'retries': 2, + 'headers': {'somehdr': 'val'}, + 'data': 'data', 'sec_between': 1, + 'ssl_details': {'cert_file': '/path/cert.pem'}, + 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} + self.assertEqual(response, read_file_or_url(**params)) + params.pop('url') # url is passed in as a positional arg + self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) + + def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): + """Readurl param defaults used when unspecified by read_file_or_url + + Param defaults tested are as follows: + retries: 0, additional headers None beyond default, method: GET, + data: None, check_status: True and allow_redirects: True + """ + url = 'http://hostname/path' + + m_response = mock.MagicMock() + + class FakeSession(requests.Session): + def request(cls, **kwargs): + self.assertEqual( + {'url': url, 'allow_redirects': True, 'method': 'GET', + 'headers': { + 'User-Agent': 'Cloud-Init/%s' % ( + version.version_string())}}, + kwargs) + return m_response + + with mock.patch(M_PATH + 'requests.Session') as m_session: + error = requests.exceptions.HTTPError('broke') + m_session.side_effect = [error, FakeSession()] + # assert no retries and check_status == True + with self.assertRaises(UrlError) as context_manager: + response = read_file_or_url(url) + self.assertEqual('broke', str(context_manager.exception)) + # assert default headers, method, url and allow_redirects True + # Success on 2nd call with FakeSession + response = read_file_or_url(url) + self.assertEqual(m_response, response._response) + class TestRetryOnUrlExc(CiTestCase): diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 48ddae45..1496a471 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -81,14 +81,19 @@ def combine_url(base, *add_ons): return url -def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, ssl_details=None, - headers_cb=None, exception_cb=None): +def read_file_or_url(url, **kwargs): + """Wrapper function around readurl to allow passing a file path as url. + + When url is not a local file path, passthrough any kwargs to readurl. + + In the case of parameter passthrough to readurl, default values for some + parameters. See: call-signature of readurl in this module for param docs. + """ url = url.lstrip() if url.startswith("/"): url = "file://%s" % url if url.lower().startswith("file://"): - if data: + if kwargs.get("data"): LOG.warning("Unable to post data to file resource %s", url) file_path = url[len("file://"):] try: @@ -101,10 +106,7 @@ def read_file_or_url(url, timeout=5, retries=10, raise UrlError(cause=e, code=code, headers=None, url=url) return FileResponse(file_path, contents=contents) else: - return readurl(url, timeout=timeout, retries=retries, - headers_cb=headers_cb, data=data, - sec_between=sec_between, ssl_details=ssl_details, - exception_cb=exception_cb) + return readurl(url, **kwargs) # Made to have same accessors as UrlResponse so that the @@ -201,6 +203,35 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, check_status=True, allow_redirects=True, exception_cb=None, session=None, infinite=False, log_req_resp=True, request_method=None): + """Wrapper around requests.Session to read the url and retry if necessary + + :param url: Mandatory url to request. + :param data: Optional form data to post the URL. Will set request_method + to 'POST' if present. + :param timeout: Timeout in seconds to wait for a response + :param retries: Number of times to retry on exception if exception_cb is + None or exception_cb returns True for the exception caught. Default is + to fail with 0 retries on exception. + :param sec_between: Default 1: amount of seconds passed to time.sleep + between retries. None or -1 means don't sleep. + :param headers: Optional dict of headers to send during request + :param headers_cb: Optional callable returning a dict of values to send as + headers during request + :param ssl_details: Optional dict providing key_file, ca_certs, and + cert_file keys for use on in ssl connections. + :param check_status: Optional boolean set True to raise when HTTPError + occurs. Default: True. + :param allow_redirects: Optional boolean passed straight to Session.request + as 'allow_redirects'. Default: True. + :param exception_cb: Optional callable which accepts the params + msg and exception and returns a boolean True if retries are permitted. + :param session: Optional exiting requests.Session instance to reuse. + :param infinite: Bool, set True to retry indefinitely. Default: False. + :param log_req_resp: Set False to turn off verbose debug messages. + :param request_method: String passed as 'method' to Session.request. + Typically GET, or POST. Default: POST if data is provided, GET + otherwise. + """ url = _cleanurl(url) req_args = { 'url': url, diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index ed83d2d8..15af1daf 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -224,7 +224,7 @@ class UserDataProcessor(object): content = util.load_file(include_once_fn) else: try: - resp = read_file_or_url(include_url, + resp = read_file_or_url(include_url, timeout=5, retries=10, ssl_details=self.ssl_details) if include_once_on and resp.ok(): util.write_file(include_once_fn, resp.contents, diff --git a/cloudinit/util.py b/cloudinit/util.py index 78b6a2d0..9d9d5c72 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -986,13 +986,6 @@ def load_yaml(blob, default=None, allowed=(dict,)): def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): - if base.startswith("/"): - base = "file://%s" % base - - # default retries for file is 0. for network is 10 - if base.startswith("file://"): - retries = file_retries - if base.find("%s") >= 0: ud_url = base % ("user-data" + ext) md_url = base % ("meta-data" + ext) @@ -1000,14 +993,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): ud_url = "%s%s%s" % (base, "user-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) - md_resp = url_helper.read_file_or_url(md_url, timeout, retries, - file_retries) + md_resp = url_helper.read_file_or_url(md_url, timeout=timeout, + retries=retries) md = None if md_resp.ok(): md = load_yaml(decode_binary(md_resp.contents), default={}) - ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, - file_retries) + ud_resp = url_helper.read_file_or_url(ud_url, timeout=timeout, + retries=retries) ud = None if ud_resp.ok(): ud = ud_resp.contents diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index bd006aba..bd17f636 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -212,8 +212,10 @@ class TestAzureEndpointHttpClient(CiTestCase): response = client.get(url, secure=False) self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=self.regular_headers), - self.read_file_or_url.call_args) + self.assertEqual( + mock.call(url, headers=self.regular_headers, retries=10, + timeout=5), + self.read_file_or_url.call_args) def test_secure_get(self): url = 'MyTestUrl' @@ -227,8 +229,10 @@ class TestAzureEndpointHttpClient(CiTestCase): response = client.get(url, secure=True) self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=expected_headers), - self.read_file_or_url.call_args) + self.assertEqual( + mock.call(url, headers=expected_headers, retries=10, + timeout=5), + self.read_file_or_url.call_args) def test_post(self): data = mock.MagicMock() @@ -238,7 +242,8 @@ class TestAzureEndpointHttpClient(CiTestCase): self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) self.assertEqual( - mock.call(url, data=data, headers=self.regular_headers), + mock.call(url, data=data, headers=self.regular_headers, retries=10, + timeout=5), self.read_file_or_url.call_args) def test_post_with_extra_headers(self): @@ -250,7 +255,8 @@ class TestAzureEndpointHttpClient(CiTestCase): expected_headers = self.regular_headers.copy() expected_headers.update(extra_headers) self.assertEqual( - mock.call(mock.ANY, data=mock.ANY, headers=expected_headers), + mock.call(mock.ANY, data=mock.ANY, headers=expected_headers, + retries=10, timeout=5), self.read_file_or_url.call_args) -- cgit v1.2.3 From 129b1c4ea250619bd7caed7aaffacc796b0139f2 Mon Sep 17 00:00:00 2001 From: AOhassan <37305877+AOhassan@users.noreply.github.com> Date: Thu, 12 Dec 2019 13:51:42 -0800 Subject: azure: avoid re-running cloud-init when instance-id is byte-swapped (#84) Azure stores the instance ID with an incorrect byte ordering for the first three hyphen delimited parts. This results in invalid is_new_instance checks forcing Azure datasource to recrawl the metadata service. When persisting instance-id from the metadata service, swap the instance-id string byte order such that it is consistent with that returned by dmi information. Check whether the instance-id string is a byte-swapped match when determining correctly whether the Azure platform instance-id has actually changed. --- cloudinit/sources/DataSourceAzure.py | 16 ++++++++++--- cloudinit/sources/helpers/azure.py | 27 ++++++++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++--- .../unittests/test_datasource/test_azure_helper.py | 19 +++++++++++++++ 4 files changed, 80 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 87a848ce..24f448c5 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -33,7 +33,8 @@ from cloudinit.sources.helpers.azure import ( get_boot_telemetry, get_system_info, report_diagnostic_event, - EphemeralDHCPv4WithReporting) + EphemeralDHCPv4WithReporting, + is_byte_swapped) LOG = logging.getLogger(__name__) @@ -471,8 +472,7 @@ class DataSourceAzure(sources.DataSource): seed = _get_random_seed() if seed: crawled_data['metadata']['random_seed'] = seed - crawled_data['metadata']['instance-id'] = util.read_dmi_data( - 'system-uuid') + crawled_data['metadata']['instance-id'] = self._iid() if perform_reprovision: LOG.info("Reporting ready to Azure after getting ReprovisionData") @@ -558,6 +558,16 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def _iid(self, previous=None): + prev_iid_path = os.path.join( + self.paths.get_cpath('data'), 'instance-id') + iid = util.read_dmi_data('system-uuid') + if os.path.exists(prev_iid_path): + previous = util.load_file(prev_iid_path).strip() + if is_byte_swapped(previous, iid): + return previous + return iid + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index f5cdb3fd..fc760581 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -7,6 +7,7 @@ import re import socket import struct import time +import textwrap from cloudinit.net import dhcp from cloudinit import stages @@ -48,6 +49,32 @@ def azure_ds_telemetry_reporter(func): return impl +def is_byte_swapped(previous_id, current_id): + """ + Azure stores the instance ID with an incorrect byte ordering for the + first parts. This corrects the byte order such that it is consistent with + that returned by the metadata service. + """ + if previous_id == current_id: + return False + + def swap_bytestring(s, width=2): + dd = [byte for byte in textwrap.wrap(s, 2)] + dd.reverse() + return ''.join(dd) + + parts = current_id.split('-') + swapped_id = '-'.join([ + swap_bytestring(parts[0]), + swap_bytestring(parts[1]), + swap_bytestring(parts[2]), + parts[3], + parts[4] + ]) + + return previous_id == swapped_id + + @azure_ds_telemetry_reporter def get_boot_telemetry(): """Report timestamps related to kernel initialization and systemd diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 59e351de..a809fd87 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -477,7 +477,7 @@ scbus-1 on xpt0 bus 0 'public-keys': [], }) - self.instance_id = 'test-instance-id' + self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' def _dmi_mocks(key): if key == 'system-uuid': @@ -645,7 +645,7 @@ scbus-1 on xpt0 bus 0 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, 'imds': NETWORK_METADATA, - 'instance-id': 'test-instance-id', + 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -1091,6 +1091,24 @@ scbus-1 on xpt0 bus 0 self.assertTrue(ret) self.assertEqual('value', dsrc.metadata['test']) + def test_instance_id_endianness(self): + """Return the previous iid when dmi uuid is the byteswapped iid.""" + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + # byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + # not byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + def test_instance_id_from_dmidecode_used(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) ds.get_data() @@ -1292,7 +1310,7 @@ class TestAzureBounce(CiTestCase): def _dmi_mocks(key): if key == 'system-uuid': - return 'test-instance-id' + return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' elif key == 'chassis-asset-tag': return '7783-7084-3265-9085-8269-3286-77' raise RuntimeError('should not get here') diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index bd17f636..007df09f 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -170,6 +170,25 @@ class TestGoalStateParsing(CiTestCase): goal_state = self._get_goal_state(instance_id=instance_id) self.assertEqual(instance_id, goal_state.instance_id) + def test_instance_id_byte_swap(self): + """Return true when previous_iid is byteswapped current_iid""" + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" + self.assertTrue( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_same_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_diff_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + def test_certificates_xml_parsed_and_fetched_correctly(self): http_client = mock.MagicMock() certificates_url = 'TestCertificatesUrl' -- cgit v1.2.3 From 8c4fd886931abcf2cc8627a47463907d655b35c3 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 21 Jan 2020 17:15:30 -0500 Subject: Start removing dependency on six (#178) * url_helper: drop six * url_helper: sort imports * log: drop six * log: sort imports * handlers/__init__: drop six * handlers/__init__: sort imports * user_data: drop six * user_data: sort imports * sources/__init__: drop six * sources/__init__: sort imports * DataSourceOVF: drop six * DataSourceOVF: sort imports * sources/helpers/openstack: drop six * sources/helpers/openstack: sort imports * mergers/m_str: drop six This also allowed simplification of the logic, as we will never encounter a non-string text type. * type_utils: drop six * mergers/m_dict: drop six * mergers/m_list: drop six * cmd/query: drop six * mergers/__init__: drop six * net/cmdline: drop six * reporting/handlers: drop six * reporting/handlers: sort imports --- cloudinit/cmd/query.py | 3 +-- cloudinit/handlers/__init__.py | 9 +++------ cloudinit/log.py | 14 +++++--------- cloudinit/mergers/__init__.py | 4 +--- cloudinit/mergers/m_dict.py | 4 +--- cloudinit/mergers/m_list.py | 4 +--- cloudinit/mergers/m_str.py | 9 ++------- cloudinit/net/cmdline.py | 5 +---- cloudinit/reporting/handlers.py | 19 ++++++------------- cloudinit/sources/DataSourceOVF.py | 8 ++------ cloudinit/sources/__init__.py | 21 +++++++++------------ cloudinit/sources/helpers/openstack.py | 8 ++------ cloudinit/type_utils.py | 25 +++++++------------------ cloudinit/url_helper.py | 20 +++++--------------- cloudinit/user_data.py | 11 ++++------- 15 files changed, 50 insertions(+), 114 deletions(-) (limited to 'cloudinit/sources/helpers') diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index 1d888b9d..e3db8679 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -5,7 +5,6 @@ import argparse from errno import EACCES import os -import six import sys from cloudinit.handlers.jinja_template import ( @@ -149,7 +148,7 @@ def handle_args(name, args): response = '\n'.join(sorted(response.keys())) elif args.list_keys: response = '\n'.join(sorted(response.keys())) - if not isinstance(response, six.string_types): + if not isinstance(response, str): response = util.json_dumps(response) print(response) return 0 diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 0db75af9..a409ff8a 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -10,14 +10,12 @@ import abc import os -import six - -from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils from cloudinit import util +from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) LOG = logging.getLogger(__name__) @@ -60,8 +58,7 @@ INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()), key=(lambda e: 0 - len(e))) -@six.add_metaclass(abc.ABCMeta) -class Handler(object): +class Handler(metaclass=abc.ABCMeta): def __init__(self, frequency, version=2): self.handler_version = version @@ -159,7 +156,7 @@ def _extract_first_or_bytes(blob, size): # Extract the first line or upto X symbols for text objects # Extract first X bytes for binary objects try: - if isinstance(blob, six.string_types): + if isinstance(blob, str): start = blob.split("\n", 1)[0] else: # We want to avoid decoding the whole blob (it might be huge) diff --git a/cloudinit/log.py b/cloudinit/log.py index 5ae312ba..827db12b 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -8,17 +8,13 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import collections +import io import logging import logging.config import logging.handlers - -import collections import os import sys - -import six -from six import StringIO - import time # Logging levels for easy access @@ -74,13 +70,13 @@ def setupLogging(cfg=None): log_cfgs = [] log_cfg = cfg.get('logcfg') - if log_cfg and isinstance(log_cfg, six.string_types): + if log_cfg and isinstance(log_cfg, str): # If there is a 'logcfg' entry in the config, # respect it, it is the old keyname log_cfgs.append(str(log_cfg)) elif "log_cfgs" in cfg: for a_cfg in cfg['log_cfgs']: - if isinstance(a_cfg, six.string_types): + if isinstance(a_cfg, str): log_cfgs.append(a_cfg) elif isinstance(a_cfg, (collections.Iterable)): cfg_str = [str(c) for c in a_cfg] @@ -100,7 +96,7 @@ def setupLogging(cfg=None): # is acting as a file) pass else: - log_cfg = StringIO(log_cfg) + log_cfg = io.StringIO(log_cfg) # Attempt to load its config logging.config.fileConfig(log_cfg) # The first one to work wins! diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 7fbc25ff..668e3cd6 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -6,8 +6,6 @@ import re -import six - from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils @@ -85,7 +83,7 @@ def dict_extract_mergers(config): raw_mergers = config.pop('merge_type', None) if raw_mergers is None: return parsed_mergers - if isinstance(raw_mergers, six.string_types): + if isinstance(raw_mergers, str): return string_extract_mergers(raw_mergers) for m in raw_mergers: if isinstance(m, (dict)): diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index 6c5fddc2..93472f13 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - DEF_MERGE_TYPE = 'no_replace' MERGE_TYPES = ('replace', DEF_MERGE_TYPE,) @@ -47,7 +45,7 @@ class Merger(object): return new_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, six.string_types) and self._recurse_str: + if isinstance(new_v, str) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index daa0469a..19f32771 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - DEF_MERGE_TYPE = 'replace' MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace') @@ -63,7 +61,7 @@ class Merger(object): return old_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, six.string_types) and self._recurse_str: + if isinstance(new_v, str) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py index 629df58e..539e3e29 100644 --- a/cloudinit/mergers/m_str.py +++ b/cloudinit/mergers/m_str.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - class Merger(object): def __init__(self, _merger, opts): @@ -23,13 +21,10 @@ class Merger(object): # perform the following action, if appending we will # merge them together, otherwise we will just return value. def _on_str(self, value, merge_with): - if not isinstance(value, six.string_types): + if not isinstance(value, str): return merge_with if not self._append: return merge_with - if isinstance(value, six.text_type): - return value + six.text_type(merge_with) - else: - return value + six.binary_type(merge_with) + return value + merge_with # vi: ts=4 expandtab diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 55166ea8..bfb40aae 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -12,8 +12,6 @@ import gzip import io import os -import six - from cloudinit import util from . import get_devicelist @@ -22,8 +20,7 @@ from . import read_sys_net_safe _OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" -@six.add_metaclass(abc.ABCMeta) -class InitramfsNetworkConfigSource(object): +class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta): """ABC for net config sources that read config written by initramfses""" @abc.abstractmethod diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 6605e795..946df7e0 100755 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -1,25 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -import uuid import fcntl import json -import six import os +import queue import struct import threading import time +import uuid +from datetime import datetime from cloudinit import log as logging from cloudinit.registry import DictRegistry from cloudinit import (url_helper, util) -from datetime import datetime -from six.moves.queue import Empty as QueueEmptyError - -if six.PY2: - from multiprocessing.queues import JoinableQueue as JQueue -else: - from queue import Queue as JQueue LOG = logging.getLogger(__name__) @@ -28,8 +22,7 @@ class ReportException(Exception): pass -@six.add_metaclass(abc.ABCMeta) -class ReportingHandler(object): +class ReportingHandler(metaclass=abc.ABCMeta): """Base class for report handlers. Implement :meth:`~publish_event` for controlling what @@ -141,7 +134,7 @@ class HyperVKvpReportingHandler(ReportingHandler): self._kvp_file_path) self._event_types = event_types - self.q = JQueue() + self.q = queue.Queue() self.incarnation_no = self._get_incarnation_no() self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, self.incarnation_no) @@ -303,7 +296,7 @@ class HyperVKvpReportingHandler(ReportingHandler): # get all the rest of the events in the queue event = self.q.get(block=False) items_from_queue += 1 - except QueueEmptyError: + except queue.Empty: event = None try: self._append_kvp_item(encoded_data) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 896841e3..9f6e6b6c 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -8,19 +8,15 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from xml.dom import minidom - import base64 import os import re import time - -import six +from xml.dom import minidom from cloudinit import log as logging from cloudinit import sources from cloudinit import util - from cloudinit.sources.helpers.vmware.imc.config \ import Config from cloudinit.sources.helpers.vmware.imc.config_custom_script \ @@ -458,7 +454,7 @@ def maybe_cdrom_device(devname): """ if not devname: return False - elif not isinstance(devname, six.string_types): + elif not isinstance(devname, str): raise ValueError("Unexpected input for devname: %s" % devname) # resolve '..' and multi '/' elements diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6baf8f4..dd93cfd8 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -9,21 +9,19 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -from collections import namedtuple import copy import json import os -import six +from collections import namedtuple -from cloudinit.atomic_helper import write_json from cloudinit import importer from cloudinit import log as logging from cloudinit import net -from cloudinit.event import EventType from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util - +from cloudinit.atomic_helper import write_json +from cloudinit.event import EventType from cloudinit.filters import launch_index from cloudinit.reporting import events @@ -136,8 +134,7 @@ URLParams = namedtuple( 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) -@six.add_metaclass(abc.ABCMeta) -class DataSource(object): +class DataSource(metaclass=abc.ABCMeta): dsmode = DSMODE_NETWORK default_locale = 'en_US.UTF-8' @@ -436,7 +433,7 @@ class DataSource(object): return self._cloud_name if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY): cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) - if isinstance(cloud_name, six.string_types): + if isinstance(cloud_name, str): self._cloud_name = cloud_name.lower() else: self._cloud_name = self._get_cloud_name().lower() @@ -718,8 +715,8 @@ def normalize_pubkey_data(pubkey_data): if not pubkey_data: return keys - if isinstance(pubkey_data, six.string_types): - return str(pubkey_data).splitlines() + if isinstance(pubkey_data, str): + return pubkey_data.splitlines() if isinstance(pubkey_data, (list, set)): return list(pubkey_data) @@ -729,7 +726,7 @@ def normalize_pubkey_data(pubkey_data): # lp:506332 uec metadata service responds with # data that makes boto populate a string for 'klist' rather # than a list. - if isinstance(klist, six.string_types): + if isinstance(klist, str): klist = [klist] if isinstance(klist, (list, set)): for pkey in klist: @@ -837,7 +834,7 @@ def convert_vendordata(data, recurse=True): """ if not data: return None - if isinstance(data, six.string_types): + if isinstance(data, str): return data if isinstance(data, list): return copy.deepcopy(data) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 0778f45a..441db506 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -12,15 +12,12 @@ import copy import functools import os -import six - from cloudinit import ec2_utils from cloudinit import log as logging from cloudinit import net from cloudinit import sources from cloudinit import url_helper from cloudinit import util - from cloudinit.sources import BrokenMetadata # See https://docs.openstack.org/user-guide/cli-config-drive.html @@ -163,8 +160,7 @@ class SourceMixin(object): return device -@six.add_metaclass(abc.ABCMeta) -class BaseReader(object): +class BaseReader(metaclass=abc.ABCMeta): def __init__(self, base_path): self.base_path = base_path @@ -227,7 +223,7 @@ class BaseReader(object): """ load_json_anytype = functools.partial( - util.load_json, root_types=(dict, list) + six.string_types) + util.load_json, root_types=(dict, list, str)) def datafiles(version): files = {} diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py index 6132654b..2c1ae368 100644 --- a/cloudinit/type_utils.py +++ b/cloudinit/type_utils.py @@ -10,29 +10,18 @@ import types -import six - -if six.PY3: - _NAME_TYPES = ( - types.ModuleType, - types.FunctionType, - types.LambdaType, - type, - ) -else: - _NAME_TYPES = ( - types.TypeType, - types.ModuleType, - types.FunctionType, - types.LambdaType, - types.ClassType, - ) +_NAME_TYPES = ( + types.ModuleType, + types.FunctionType, + types.LambdaType, + type, +) def obj_name(obj): if isinstance(obj, _NAME_TYPES): - return six.text_type(obj.__name__) + return str(obj.__name__) else: if not hasattr(obj, '__class__'): return repr(obj) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 1496a471..f6d68436 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -10,32 +10,22 @@ import json import os -import requests -import six import time - from email.utils import parsedate from errno import ENOENT from functools import partial +from http.client import NOT_FOUND from itertools import count -from requests import exceptions +from urllib.parse import urlparse, urlunparse, quote -from six.moves.urllib.parse import ( - urlparse, urlunparse, - quote as urlquote) +import requests +from requests import exceptions from cloudinit import log as logging from cloudinit import version LOG = logging.getLogger(__name__) -if six.PY2: - import httplib - NOT_FOUND = httplib.NOT_FOUND -else: - import http.client - NOT_FOUND = http.client.NOT_FOUND - # Check if requests has ssl support (added in requests >= 0.8.8) SSL_ENABLED = False @@ -71,7 +61,7 @@ def combine_url(base, *add_ons): path = url_parsed[2] if path and not path.endswith("/"): path += "/" - path += urlquote(str(add_on), safe="/:") + path += quote(str(add_on), safe="/:") url_parsed[2] = path return urlunparse(url_parsed) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 15af1daf..6f41b03a 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -9,14 +9,11 @@ # This file is part of cloud-init. See LICENSE file for license information. import os - from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.nonmultipart import MIMENonMultipart from email.mime.text import MIMEText -import six - from cloudinit import handlers from cloudinit import log as logging from cloudinit.url_helper import read_file_or_url, UrlError @@ -259,7 +256,7 @@ class UserDataProcessor(object): # filename and type not be present # or # scalar(payload) - if isinstance(ent, six.string_types): + if isinstance(ent, str): ent = {'content': ent} if not isinstance(ent, (dict)): # TODO(harlowja) raise? @@ -269,13 +266,13 @@ class UserDataProcessor(object): mtype = ent.get('type') if not mtype: default = ARCHIVE_UNDEF_TYPE - if isinstance(content, six.binary_type): + if isinstance(content, bytes): default = ARCHIVE_UNDEF_BINARY_TYPE mtype = handlers.type_from_starts_with(content, default) maintype, subtype = mtype.split('/', 1) if maintype == "text": - if isinstance(content, six.binary_type): + if isinstance(content, bytes): content = content.decode() msg = MIMEText(content, _subtype=subtype) else: @@ -348,7 +345,7 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): msg.set_payload(data) return msg - if isinstance(raw_data, six.text_type): + if isinstance(raw_data, str): bdata = raw_data.encode('utf-8') else: bdata = raw_data -- cgit v1.2.3