From 5b065316113b97aadb43e63cc31bb8639f6a6376 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 14 Dec 2018 03:24:26 +0000 Subject: Update to pylint 2.2.2. The tip-pylint tox target correctly reported the invalid use of string formatting. The change here is to: a.) Fix the error that was caught. b.) move to pylint 2.2.2 for the default 'pylint' target. --- cloudinit/sources/DataSourceAzure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e076d5dc..46efca4a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -980,8 +980,8 @@ def read_azure_ovf(contents): raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") if len(lpcs_nodes) > 1: raise BrokenAzureDataSource("found '%d' %ss" % - ("LinuxProvisioningConfigurationSet", - len(lpcs_nodes))) + (len(lpcs_nodes), + "LinuxProvisioningConfigurationSet")) lpcs = lpcs_nodes[0] if not lpcs.hasChildNodes(): -- cgit v1.2.3 From f19dc8fa62d4fd8de33311c3c75c5b6da440bebe Mon Sep 17 00:00:00 2001 From: Jason Zions Date: Tue, 15 Jan 2019 17:05:47 +0000 Subject: [Azure] Increase retries when talking to Wireserver during metadata walk Testing startup of large numbers of VMs (of varying distros) in Azure shows that 3 retries results in a small percentage of failed VMs. Increasing that by a few dramatically decreases the occurrence of provisioning timeout errors. The initial choice of "3 retries" was uninformed by heavy testing. Also, the alternate provisioning mechanism for Azure (waagent) retries the Wireserver crawl without limit. 10 retries seems a more reasonable choice. --- cloudinit/sources/DataSourceAzure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 46efca4a..a4f998b3 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -416,7 +416,7 @@ class DataSourceAzure(sources.DataSource): raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( - self.fallback_interface, retries=3) + self.fallback_interface, retries=10) (md, userdata_raw, cfg, files) = ret self.seed = cdev crawled_data.update({ -- cgit v1.2.3 From 34f54360fcc1e0f805002a0b639d0a84eb2cb8ee Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 22 Feb 2019 13:26:31 +0000 Subject: azure: Filter list of ssh keys pulled from fabric The Azure data source is expected to expose a list of ssh keys for the user-to-be-provisioned in the crawled metadata. When configured to use the __builtin__ agent this list is built by the WALinuxAgentShim. The shim retrieves the full set of certificates and public keys exposed to the VM from the wireserver, extracts any ssh keys it can, and returns that list. This fix reduces that list of ssh keys to just the ones whose fingerprints appear in the "administrative user" section of the ovf-env.xml file. The Azure control plane exposes other ssh keys to the VM for other reasons, but those should not be added to the authorized_keys file for the provisioned user. --- cloudinit/sources/DataSourceAzure.py | 13 +- cloudinit/sources/helpers/azure.py | 109 ++++++++++----- tests/data/azure/parse_certificates_fingerprints | 4 + tests/data/azure/parse_certificates_pem | 152 +++++++++++++++++++++ tests/data/azure/pubkey_extract_cert | 13 ++ tests/data/azure/pubkey_extract_ssh_key | 1 + .../unittests/test_datasource/test_azure_helper.py | 71 +++++++++- 7 files changed, 322 insertions(+), 41 deletions(-) create mode 100644 tests/data/azure/parse_certificates_fingerprints create mode 100644 tests/data/azure/parse_certificates_pem create mode 100644 tests/data/azure/pubkey_extract_cert create mode 100644 tests/data/azure/pubkey_extract_ssh_key (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a4f998b3..eccbee5a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -627,9 +627,11 @@ class DataSourceAzure(sources.DataSource): if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: self.bounce_network_with_azure_hostname() + pubkey_info = self.cfg.get('_pubkeys', None) metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. - dhclient_lease_file) + dhclient_lease_file, + pubkey_info=pubkey_info) else: metadata_func = self.get_metadata_from_agent @@ -642,6 +644,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False + util.del_file(REPORTED_READY_MARKER_FILE) util.del_file(REPROVISION_MARKER_FILE) return fabric_data @@ -909,13 +912,15 @@ def find_child(node, filter_func): def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. - # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', - # 'path': 'where/to/go'}] + # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', + # 'path': '/where/to/go'}] # # - # ABC/ABC + # ABC/x/y/z # ... # + # Under some circumstances, there may be a element along with the + # Fingerprint and Path. Pass those along if they appear. results = find_child(sshnode, lambda n: n.localName == "PublicKeys") if len(results) == 0: return [] diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index e5696b1f..2829dd20 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -138,9 +138,36 @@ class OpenSSLManager(object): self.certificate = certificate LOG.debug('New certificate generated.') - def parse_certificates(self, certificates_xml): - tag = ElementTree.fromstring(certificates_xml).find( - './/Data') + @staticmethod + def _run_x509_action(action, cert): + cmd = ['openssl', 'x509', '-noout', action] + result, _ = util.subp(cmd, data=cert) + return result + + def _get_ssh_key_from_cert(self, certificate): + pub_key = self._run_x509_action('-pubkey', certificate) + keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + ssh_key, _ = util.subp(keygen_cmd, data=pub_key) + return ssh_key + + def _get_fingerprint_from_cert(self, certificate): + """openssl x509 formats fingerprints as so: + 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ + B6:A8:BF:27:D4:73\n' + + Azure control plane passes that fingerprint as so: + '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + """ + raw_fp = self._run_x509_action('-fingerprint', certificate) + eq = raw_fp.find('=') + octets = raw_fp[eq+1:-1].split(':') + return ''.join(octets) + + def _decrypt_certs_from_xml(self, certificates_xml): + """Decrypt the certificates XML document using the our private key; + return the list of certs and private keys contained in the doc. + """ + tag = ElementTree.fromstring(certificates_xml).find('.//Data') certificates_content = tag.text lines = [ b'MIME-Version: 1.0', @@ -151,32 +178,30 @@ class OpenSSLManager(object): certificates_content.encode('utf-8'), ] with cd(self.tmpdir): - with open('Certificates.p7m', 'wb') as f: - f.write(b'\n'.join(lines)) out, _ = util.subp( - 'openssl cms -decrypt -in Certificates.p7m -inkey' + 'openssl cms -decrypt -in /dev/stdin -inkey' ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' ' -password pass:'.format(**self.certificate_names), - shell=True) - private_keys, certificates = [], [] + shell=True, data=b'\n'.join(lines)) + return out + + def parse_certificates(self, certificates_xml): + """Given the Certificates XML document, return a dictionary of + fingerprints and associated SSH keys derived from the certs.""" + out = self._decrypt_certs_from_xml(certificates_xml) current = [] + keys = {} for line in out.splitlines(): current.append(line) if re.match(r'[-]+END .*?KEY[-]+$', line): - private_keys.append('\n'.join(current)) + # ignore private_keys current = [] elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificates.append('\n'.join(current)) + certificate = '\n'.join(current) + ssh_key = self._get_ssh_key_from_cert(certificate) + fingerprint = self._get_fingerprint_from_cert(certificate) + keys[fingerprint] = ssh_key current = [] - keys = [] - for certificate in certificates: - with cd(self.tmpdir): - public_key, _ = util.subp( - 'openssl x509 -noout -pubkey |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin', - data=certificate, - shell=True) - keys.append(public_key) return keys @@ -206,7 +231,6 @@ class WALinuxAgentShim(object): self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None - self.values = {} self.lease_file = fallback_lease_file def clean_up(self): @@ -328,8 +352,9 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address - def register_with_azure_and_fetch_data(self): - self.openssl_manager = OpenSSLManager() + def register_with_azure_and_fetch_data(self, pubkey_info=None): + if self.openssl_manager is None: + self.openssl_manager = OpenSSLManager() http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') attempts = 0 @@ -347,16 +372,37 @@ class WALinuxAgentShim(object): attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) - public_keys = [] - if goal_state.certificates_xml is not None: + ssh_keys = [] + if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') - public_keys = self.openssl_manager.parse_certificates( + keys_by_fingerprint = self.openssl_manager.parse_certificates( goal_state.certificates_xml) - data = { - 'public-keys': public_keys, - } + ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) self._report_ready(goal_state, http_client) - return data + return {'public-keys': ssh_keys} + + def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): + """cloud-init expects a straightforward array of keys to be dropped + into the user's authorized_keys file. Azure control plane exposes + multiple public keys to the VM via wireserver. Select just the + user's key(s) and return them, ignoring any other certs. + """ + keys = [] + for pubkey in pubkey_info: + if 'value' in pubkey and pubkey['value']: + keys.append(pubkey['value']) + elif 'fingerprint' in pubkey and pubkey['fingerprint']: + fingerprint = pubkey['fingerprint'] + if fingerprint in keys_by_fingerprint: + keys.append(keys_by_fingerprint[fingerprint]) + else: + LOG.warning("ovf-env.xml specified PublicKey fingerprint " + "%s not found in goalstate XML", fingerprint) + else: + LOG.warning("ovf-env.xml specified PublicKey with neither " + "value nor fingerprint: %s", pubkey) + + return keys def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') @@ -373,11 +419,12 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, + pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts) try: - return shim.register_with_azure_and_fetch_data() + return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) finally: shim.clean_up() diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints new file mode 100644 index 00000000..f7293c56 --- /dev/null +++ b/tests/data/azure/parse_certificates_fingerprints @@ -0,0 +1,4 @@ +ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1 +073E19D14D1C799224C6A0FD8DDAB6A8BF27D473 +4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E +929130695289B450FE45DCD5F6EF0CDE69865867 diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem new file mode 100644 index 00000000..3521ea3a --- /dev/null +++ b/tests/data/azure/parse_certificates_pem @@ -0,0 +1,152 @@ +Bag Attributes + localKeyID: 01 00 00 00 + Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP +W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6 +61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz +eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5 +7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ +47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L +Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT +nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5 +lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn +C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb +EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG +x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh ++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU +cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH +gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X +I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB +lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8 +v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed +Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId +0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA +nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe +onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG +WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2 +qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1 +1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt +RyWd+p2lYvFkC/jORQtDMY4uW1o= +-----END PRIVATE KEY----- +Bag Attributes + localKeyID: 02 00 00 00 + Microsoft CSP Name: Microsoft Strong Cryptographic Provider +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4 +FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd +x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW +dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC +gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA +N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua +tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd +0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn +giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61 +LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci +xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh +2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u +n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ +WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+ +R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3 +Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx +E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz +MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9 +SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW +EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9 +8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii +qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU +FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17 +dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz +kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y +R/fA67HXFSTT+OncdRpY1NOn +-----END PRIVATE KEY----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C +k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN +jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe +eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/ +sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo +OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT +bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 01 00 00 00 +subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +-----BEGIN CERTIFICATE----- +MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD +VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES +MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o +Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0 +MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM +CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m +dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB +FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg +ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF +hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI +B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi +quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1 +Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3 +pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg +kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX +R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF +im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e +mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz +Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP +3g== +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 02 00 00 00 +subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted +issuer=/CN=Microsoft.ManagedIdentity +-----BEGIN CERTIFICATE----- +MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL +BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy +MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny +aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz +b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w +dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN +2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee +0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW +2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw +tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw +Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P +AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD +VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB +AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe +7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b +7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0 +jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38 +UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC +pkSoWwF1QAnHn0eokR9E1rU= +-----END CERTIFICATE----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert new file mode 100644 index 00000000..ce9b852d --- /dev/null +++ b/tests/data/azure/pubkey_extract_cert @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key new file mode 100644 index 00000000..54d749ed --- /dev/null +++ b/tests/data/azure/pubkey_extract_ssh_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 26b2b93d..02556165 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1,11 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import unittest2 from textwrap import dedent from cloudinit.sources.helpers import azure as azure_helper from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir +from cloudinit.util import load_file from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim GOAL_STATE_TEMPLATE = """\ @@ -289,6 +291,50 @@ class TestOpenSSLManager(CiTestCase): self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) +class TestOpenSSLManagerActions(CiTestCase): + + def setUp(self): + super(TestOpenSSLManagerActions, self).setUp() + + self.allowed_subp = True + + def _data_file(self, name): + path = 'tests/data/azure' + return os.path.join(path, name) + + @unittest2.skip("todo move to cloud_test") + def test_pubkey_extract(self): + cert = load_file(self._data_file('pubkey_extract_cert')) + good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + sslmgr = azure_helper.OpenSSLManager() + key = sslmgr._get_ssh_key_from_cert(cert) + self.assertEqual(good_key, key) + + good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + fingerprint = sslmgr._get_fingerprint_from_cert(cert) + self.assertEqual(good_fingerprint, fingerprint) + + @unittest2.skip("todo move to cloud_test") + @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + def test_parse_certificates(self, mock_decrypt_certs): + """Azure control plane puts private keys as well as certificates + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. + """ + cert_contents = load_file(self._data_file('parse_certificates_pem')) + fingerprints = load_file(self._data_file( + 'parse_certificates_fingerprints') + ).splitlines() + mock_decrypt_certs.return_value = cert_contents + sslmgr = azure_helper.OpenSSLManager() + keys_by_fp = sslmgr.parse_certificates('') + for fp in keys_by_fp.keys(): + self.assertIn(fp, fingerprints) + for fp in fingerprints: + self.assertIn(fp, keys_by_fp) + + class TestWALinuxAgentShim(CiTestCase): def setUp(self): @@ -329,18 +375,31 @@ class TestWALinuxAgentShim(CiTestCase): def test_certificates_used_to_determine_public_keys(self): shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + """if register_with_azure_and_fetch_data() isn't passed some info about + the user's public keys, there's no point in even trying to parse + the certificates + """ + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, + {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] + certs = {'fp1': 'expected-key', + 'fp2': 'should-not-be-found', + 'fp3': 'expected-no-value-key', + } + sslmgr = self.OpenSSLManager.return_value + sslmgr.parse_certificates.return_value = certs + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], - self.OpenSSLManager.return_value.parse_certificates.call_args_list) - self.assertEqual( - self.OpenSSLManager.return_value.parse_certificates.return_value, - data['public-keys']) + sslmgr.parse_certificates.call_args_list) + self.assertIn('expected-key', data['public-keys']) + self.assertIn('expected-no-value-key', data['public-keys']) + self.assertNotIn('should-not-be-found', data['public-keys']) def test_absent_certificates_produces_empty_public_keys(self): + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] self.GoalState.return_value.certificates_xml = None shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual([], data['public-keys']) def test_correct_url_used_for_report_ready(self): -- cgit v1.2.3 From 0dc3a77f41f4544e4cb5a41637af7693410d4cdf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Tue, 26 Mar 2019 18:53:50 +0000 Subject: Azure: Ensure platform random_seed is always serializable as JSON. The Azure platform surfaces random bytes into /sys via Hyper-V. Python 2.7 json.dump() raises an exception if asked to convert a str with non-character content, and python 3.0 json.dump() won't serialize a "bytes" value. As a result, c-i instance data is often not written by Azure, making reboots slower (c-i has to repeat work). The random data is base64-encoded and then decoded into a string (str or unicode depending on the version of Python in use). The base64 string has just as many bits of entropy, so we're not throwing away useful "information", but we can be certain json.dump() will correctly serialize the bits. --- cloudinit/sources/DataSourceAzure.py | 24 +++++++++++++++++++----- tests/data/azure/non_unicode_random_string | 1 + tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++++++++-- 3 files changed, 42 insertions(+), 7 deletions(-) create mode 100644 tests/data/azure/non_unicode_random_string (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index eccbee5a..b4e3f061 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -54,6 +54,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' IMDS_URL = "http://169.254.169.254/metadata/" +PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by # stock ubuntu suported images. @@ -195,6 +196,8 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") + # TODO Find where platform entropy data is surfaced + PLATFORM_ENTROPY_SOURCE = None BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -1100,16 +1103,27 @@ def _check_freebsd_cdrom(cdrom_dev): return False -def _get_random_seed(): +def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" # azure / hyper-v provides random data here - # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config - if util.is_FreeBSD(): + if source is None: return None - return util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) + seed = util.load_file(source, quiet=True, decode=False) + + # The seed generally contains non-Unicode characters. load_file puts + # them into a str (in python 2) or bytes (in python 3). In python 2, + # bad octets in a str cause util.json_dumps() to throw an exception. In + # python 3, bytes is a non-serializable type, and the handler load_file + # uses applies b64 encoding *again* to handle it. The simplest solution + # is to just b64encode the data and then decode it to a serializable + # string. Same number of bits of entropy, just with 25% more zeroes. + # There's no need to undo this base64-encoding when the random seed is + # actually used in cc_seed_random.py. + seed = base64.b64encode(seed).decode() + + return seed def list_possible_azure_ds_devs(): diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string new file mode 100644 index 00000000..b9ecefb9 --- /dev/null +++ b/tests/data/azure/non_unicode_random_string @@ -0,0 +1 @@ +OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$ \ No newline at end of file diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 6b05b8f1..53c56cd0 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -7,11 +7,11 @@ from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, find_freebsd_part, get_path_dev_freebsd, - MountFailedError) + MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack) + ExitStack, resourceLocation) import crypt import httpretty @@ -1923,4 +1923,24 @@ class TestWBIsPlatformViable(CiTestCase): self.logs.getvalue()) +class TestRandomSeed(CiTestCase): + """Test proper handling of random_seed""" + + def test_non_ascii_seed_is_serializable(self): + """Pass if a random string from the Azure infrastructure which + contains at least one non-Unicode character can be converted to/from + JSON without alteration and without throwing an exception. + """ + path = resourceLocation("azure/non_unicode_random_string") + result = dsaz._get_random_seed(path) + + obj = {'seed': result} + try: + serialized = json_dumps(obj) + deserialized = load_json(serialized) + except UnicodeDecodeError: + self.fail("Non-serializable random seed returned") + + self.assertEqual(deserialized['seed'], result) + # vi: ts=4 expandtab -- cgit v1.2.3 From 0d8c88393b51db6454491a379dcc2e691551217a Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 3 Apr 2019 18:23:18 +0000 Subject: DatasourceAzure: add additional logging for azure datasource Create an Azure logging decorator and use additional ReportEventStack context managers to provide additional logging details. --- cloudinit/sources/DataSourceAzure.py | 231 ++++++++++++++++++++++------------- cloudinit/sources/helpers/azure.py | 31 +++++ 2 files changed, 179 insertions(+), 83 deletions(-) mode change 100644 => 100755 cloudinit/sources/DataSourceAzure.py mode change 100644 => 100755 cloudinit/sources/helpers/azure.py (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py old mode 100644 new mode 100755 index b4e3f061..d4230b3c --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -21,10 +21,14 @@ from cloudinit import net from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources -from cloudinit.sources.helpers.azure import get_metadata_from_fabric from cloudinit.sources.helpers import netlink from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util +from cloudinit.reporting import events + +from cloudinit.sources.helpers.azure import (azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric) LOG = logging.getLogger(__name__) @@ -244,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'): util.subp([hostname_command, hostname]) +@azure_ds_telemetry_reporter @contextlib.contextmanager def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ @@ -290,6 +295,7 @@ class DataSourceAzure(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + @azure_ds_telemetry_reporter def bounce_network_with_azure_hostname(self): # When using cloud-init to provision, we have to set the hostname from # the metadata and "bounce" the network to force DDNS to update via @@ -315,6 +321,7 @@ class DataSourceAzure(sources.DataSource): util.logexc(LOG, "handling set_hostname failed") return False + @azure_ds_telemetry_reporter def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') agent_cmd = self.ds_cfg['agent_command'] @@ -344,15 +351,18 @@ class DataSourceAzure(sources.DataSource): LOG.debug("ssh authentication: " "using fingerprint from fabirc") - # wait very long for public SSH keys to arrive - # https://bugs.launchpad.net/cloud-init/+bug/1717611 - missing = util.log_time(logfunc=LOG.debug, - msg="waiting for SSH public key files", - func=util.wait_for_files, - args=(fp_files, 900)) - - if len(missing): - LOG.warning("Did not find files, but going on: %s", missing) + with events.ReportEventStack( + name="waiting-for-ssh-public-key", + description="wait for agents to retrieve ssh keys", + parent=azure_ds_reporter): + # wait very long for public SSH keys to arrive + # https://bugs.launchpad.net/cloud-init/+bug/1717611 + missing = util.log_time(logfunc=LOG.debug, + msg="waiting for SSH public key files", + func=util.wait_for_files, + args=(fp_files, 900)) + if len(missing): + LOG.warning("Did not find files, but going on: %s", missing) metadata = {} metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) @@ -366,6 +376,7 @@ class DataSourceAzure(sources.DataSource): subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.seed) + @azure_ds_telemetry_reporter def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @@ -467,6 +478,7 @@ class DataSourceAzure(sources.DataSource): super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) self._metadata_imds = sources.UNSET + @azure_ds_telemetry_reporter def _get_data(self): """Crawl and process datasource metadata caching metadata as attrs. @@ -513,6 +525,7 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: LOG.debug("negotiating for %s (new_instance=%s)", @@ -580,6 +593,7 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ try: @@ -617,9 +631,14 @@ class DataSourceAzure(sources.DataSource): def _reprovision(self): """Initiate the reprovisioning workflow.""" contents = self._poll_imds() - md, ud, cfg = read_azure_ovf(contents) - return (md, ud, cfg, {'ovf-env.xml': contents}) - + with events.ReportEventStack( + name="reprovisioning-read-azure-ovf", + description="read azure ovf during reprovisioning", + parent=azure_ds_reporter): + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + + @azure_ds_telemetry_reporter def _negotiate(self): """Negotiate with fabric and return data from it. @@ -652,6 +671,7 @@ class DataSourceAzure(sources.DataSource): util.del_file(REPROVISION_MARKER_FILE) return fabric_data + @azure_ds_telemetry_reporter def activate(self, cfg, is_new_instance): address_ephemeral_resize(is_new_instance=is_new_instance, preserve_ntfs=self.ds_cfg.get( @@ -690,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16): return [] +@azure_ds_telemetry_reporter def _has_ntfs_filesystem(devpath): ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) LOG.debug('ntfs_devices found = %s', ntfs_devices) return os.path.realpath(devpath) in ntfs_devices +@azure_ds_telemetry_reporter def can_dev_be_reformatted(devpath, preserve_ntfs): """Determine if the ephemeral drive at devpath should be reformatted. @@ -744,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): (cand_part, cand_path, devpath)) return False, msg + @azure_ds_telemetry_reporter def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) bmsg = ('partition %s (%s) on device %s was ntfs formatted' % (cand_part, cand_path, devpath)) - try: - file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", - update_env_for_mount={'LANG': 'C'}) - except util.MountFailedError as e: - if "unknown filesystem type 'ntfs'" in str(e): - return True, (bmsg + ' but this system cannot mount NTFS,' - ' assuming there are no important files.' - ' Formatting allowed.') - return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) - - if file_count != 0: - LOG.warning("it looks like you're using NTFS on the ephemeral disk, " - 'to ensure that filesystem does not get wiped, set ' - '%s.%s in config', '.'.join(DS_CFG_PATH), - DS_CFG_KEY_PRESERVE_NTFS) - return False, bmsg + ' but had %d files on it.' % file_count + + with events.ReportEventStack( + name="mount-ntfs-and-count", + description="mount-ntfs-and-count", + parent=azure_ds_reporter) as evt: + try: + file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", + update_env_for_mount={'LANG': 'C'}) + except util.MountFailedError as e: + evt.description = "cannot mount ntfs" + if "unknown filesystem type 'ntfs'" in str(e): + return True, (bmsg + ' but this system cannot mount NTFS,' + ' assuming there are no important files.' + ' Formatting allowed.') + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) + + if file_count != 0: + evt.description = "mounted and counted %d files" % file_count + LOG.warning("it looks like you're using NTFS on the ephemeral" + " disk, to ensure that filesystem does not get wiped," + " set %s.%s in config", '.'.join(DS_CFG_PATH), + DS_CFG_KEY_PRESERVE_NTFS) + return False, bmsg + ' but had %d files on it.' % file_count return True, bmsg + ' and had no important files. Safe for reformatting.' +@azure_ds_telemetry_reporter def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, is_new_instance=False, preserve_ntfs=False): # wait for ephemeral disk to come up naplen = .2 - missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, - log_pre="Azure ephemeral disk: ") - - if missing: - LOG.warning("ephemeral device '%s' did not appear after %d seconds.", - devpath, maxwait) - return + with events.ReportEventStack( + name="wait-for-ephemeral-disk", + description="wait for ephemeral disk", + parent=azure_ds_reporter): + missing = util.wait_for_files([devpath], + maxwait=maxwait, + naplen=naplen, + log_pre="Azure ephemeral disk: ") + + if missing: + LOG.warning("ephemeral device '%s' did" + " not appear after %d seconds.", + devpath, maxwait) + return result = False msg = None @@ -808,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, return +@azure_ds_telemetry_reporter def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command @@ -843,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): return True +@azure_ds_telemetry_reporter def crtfile_to_pubkey(fname, data=None): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') @@ -851,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None): return out.rstrip() +@azure_ds_telemetry_reporter def pubkeys_from_crt_files(flist): pubkeys = [] errors = [] @@ -866,6 +907,7 @@ def pubkeys_from_crt_files(flist): return pubkeys +@azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): def _redact_password(cnt, fname): @@ -893,6 +935,7 @@ def write_files(datadir, files, dirmode=None): util.write_file(filename=fname, content=content, mode=0o600) +@azure_ds_telemetry_reporter def invoke_agent(cmd): # this is a function itself to simplify patching it for test if cmd: @@ -912,6 +955,7 @@ def find_child(node, filter_func): return ret +@azure_ds_telemetry_reporter def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. @@ -964,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode): return found +@azure_ds_telemetry_reporter def read_azure_ovf(contents): try: dom = minidom.parseString(contents) @@ -1064,6 +1109,7 @@ def read_azure_ovf(contents): return (md, ud, cfg) +@azure_ds_telemetry_reporter def _extract_preprovisioned_vm_setting(dom): """Read the preprovision flag from the ovf. It should not exist unless true.""" @@ -1092,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) +@azure_ds_telemetry_reporter def _check_freebsd_cdrom(cdrom_dev): """Return boolean indicating path to cdrom device has content.""" try: @@ -1103,6 +1150,7 @@ def _check_freebsd_cdrom(cdrom_dev): return False +@azure_ds_telemetry_reporter def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" @@ -1126,6 +1174,7 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): return seed +@azure_ds_telemetry_reporter def list_possible_azure_ds_devs(): devlist = [] if util.is_FreeBSD(): @@ -1140,6 +1189,7 @@ def list_possible_azure_ds_devs(): return devlist +@azure_ds_telemetry_reporter def load_azure_ds_dir(source_dir): ovf_file = os.path.join(source_dir, "ovf-env.xml") @@ -1162,47 +1212,54 @@ def parse_network_config(imds_metadata): @param: imds_metadata: Dict of content read from IMDS network service. @return: Dictionary containing network version 2 standard configuration. """ - if imds_metadata != sources.UNSET and imds_metadata: - netconfig = {'version': 2, 'ethernets': {}} - LOG.debug('Azure: generating network configuration from IMDS') - network_metadata = imds_metadata['network'] - for idx, intf in enumerate(network_metadata['interface']): - nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} - for addr4 in intf['ipv4']['ipAddress']: - privateIpv4 = addr4['privateIpAddress'] - if privateIpv4: - if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 - netPrefix = intf['ipv4']['subnet'][0].get( - 'prefix', '24') - if not dev_config.get('addresses'): - dev_config['addresses'] = [] - dev_config['addresses'].append( - '{ip}/{prefix}'.format( - ip=privateIpv4, prefix=netPrefix)) - else: - dev_config['dhcp4'] = True - for addr6 in intf['ipv6']['ipAddress']: - privateIpv6 = addr6['privateIpAddress'] - if privateIpv6: - dev_config['dhcp6'] = True - break - if dev_config: - mac = ':'.join(re.findall(r'..', intf['macAddress'])) - dev_config.update( - {'match': {'macaddress': mac.lower()}, - 'set-name': nicname}) - netconfig['ethernets'][nicname] = dev_config - else: - blacklist = ['mlx4_core'] - LOG.debug('Azure: generating fallback configuration') - # generate a network config, blacklist picking mlx4_core devs - netconfig = net.generate_fallback_config( - blacklist_drivers=blacklist, config_driver=True) - return netconfig + with events.ReportEventStack( + name="parse_network_config", + description="", + parent=azure_ds_reporter) as evt: + if imds_metadata != sources.UNSET and imds_metadata: + netconfig = {'version': 2, 'ethernets': {}} + LOG.debug('Azure: generating network configuration from IMDS') + network_metadata = imds_metadata['network'] + for idx, intf in enumerate(network_metadata['interface']): + nicname = 'eth{idx}'.format(idx=idx) + dev_config = {} + for addr4 in intf['ipv4']['ipAddress']: + privateIpv4 = addr4['privateIpAddress'] + if privateIpv4: + if dev_config.get('dhcp4', False): + # Append static address config for nic > 1 + netPrefix = intf['ipv4']['subnet'][0].get( + 'prefix', '24') + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIpv4, prefix=netPrefix)) + else: + dev_config['dhcp4'] = True + for addr6 in intf['ipv6']['ipAddress']: + privateIpv6 = addr6['privateIpAddress'] + if privateIpv6: + dev_config['dhcp6'] = True + break + if dev_config: + mac = ':'.join(re.findall(r'..', intf['macAddress'])) + dev_config.update( + {'match': {'macaddress': mac.lower()}, + 'set-name': nicname}) + netconfig['ethernets'][nicname] = dev_config + evt.description = "network config from imds" + else: + blacklist = ['mlx4_core'] + LOG.debug('Azure: generating fallback configuration') + # generate a network config, blacklist picking mlx4_core devs + netconfig = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True) + evt.description = "network config from fallback" + return netconfig +@azure_ds_telemetry_reporter def get_metadata_from_imds(fallback_nic, retries): """Query Azure's network metadata service, returning a dictionary. @@ -1227,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries): return util.log_time(**kwargs) +@azure_ds_telemetry_reporter def _get_metadata_from_imds(retries): url = IMDS_URL + "instance?api-version=2017-12-01" @@ -1246,6 +1304,7 @@ def _get_metadata_from_imds(retries): return {} +@azure_ds_telemetry_reporter def maybe_remove_ubuntu_network_config_scripts(paths=None): """Remove Azure-specific ubuntu network config for non-primary nics. @@ -1283,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): def _is_platform_viable(seed_dir): - """Check platform environment to report if this datasource may run.""" - asset_tag = util.read_dmi_data('chassis-asset-tag') - if asset_tag == AZURE_CHASSIS_ASSET_TAG: - return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): - return True - return False + with events.ReportEventStack( + name="check-platform-viability", + description="found azure asset tag", + parent=azure_ds_reporter) as evt: + + """Check platform environment to report if this datasource may run.""" + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag == AZURE_CHASSIS_ASSET_TAG: + return True + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): + return True + return False class BrokenAzureDataSource(Exception): diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py old mode 100644 new mode 100755 index 2829dd20..d3af05ee --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,10 +16,27 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit.reporting import events LOG = logging.getLogger(__name__) +azure_ds_reporter = events.ReportEventStack( + name="azure-ds", + description="initialize reporter for azure ds", + reporting_enabled=True) + + +def azure_ds_telemetry_reporter(func): + def impl(*args, **kwargs): + with events.ReportEventStack( + name=func.__name__, + description=func.__name__, + parent=azure_ds_reporter): + return func(*args, **kwargs) + return impl + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -119,6 +136,7 @@ class OpenSSLManager(object): def clean_up(self): util.del_dir(self.tmpdir) + @azure_ds_telemetry_reporter def generate_certificate(self): LOG.debug('Generating certificate for communication with fabric...') if self.certificate is not None: @@ -139,17 +157,20 @@ class OpenSSLManager(object): LOG.debug('New certificate generated.') @staticmethod + @azure_ds_telemetry_reporter def _run_x509_action(action, cert): cmd = ['openssl', 'x509', '-noout', action] result, _ = util.subp(cmd, data=cert) return result + @azure_ds_telemetry_reporter def _get_ssh_key_from_cert(self, certificate): pub_key = self._run_x509_action('-pubkey', certificate) keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] ssh_key, _ = util.subp(keygen_cmd, data=pub_key) return ssh_key + @azure_ds_telemetry_reporter def _get_fingerprint_from_cert(self, certificate): """openssl x509 formats fingerprints as so: 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ @@ -163,6 +184,7 @@ class OpenSSLManager(object): octets = raw_fp[eq+1:-1].split(':') return ''.join(octets) + @azure_ds_telemetry_reporter def _decrypt_certs_from_xml(self, certificates_xml): """Decrypt the certificates XML document using the our private key; return the list of certs and private keys contained in the doc. @@ -185,6 +207,7 @@ class OpenSSLManager(object): shell=True, data=b'\n'.join(lines)) return out + @azure_ds_telemetry_reporter def parse_certificates(self, certificates_xml): """Given the Certificates XML document, return a dictionary of fingerprints and associated SSH keys derived from the certs.""" @@ -265,11 +288,13 @@ class WALinuxAgentShim(object): return socket.inet_ntoa(packed_bytes) @staticmethod + @azure_ds_telemetry_reporter def _networkd_get_value_from_leases(leases_d=None): return dhcp.networkd_get_option_from_leases( 'OPTION_245', leases_d=leases_d) @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] content = util.load_file(fallback_lease_file) @@ -287,6 +312,7 @@ class WALinuxAgentShim(object): return leases[-1] @staticmethod + @azure_ds_telemetry_reporter def _load_dhclient_json(): dhcp_options = {} hooks_dir = WALinuxAgentShim._get_hooks_dir() @@ -305,6 +331,7 @@ class WALinuxAgentShim(object): return dhcp_options @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_dhcpoptions(dhcp_options): if dhcp_options is None: return None @@ -318,6 +345,7 @@ class WALinuxAgentShim(object): return _value @staticmethod + @azure_ds_telemetry_reporter def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None if dhcp245 is not None: @@ -352,6 +380,7 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address + @azure_ds_telemetry_reporter def register_with_azure_and_fetch_data(self, pubkey_info=None): if self.openssl_manager is None: self.openssl_manager = OpenSSLManager() @@ -404,6 +433,7 @@ class WALinuxAgentShim(object): return keys + @azure_ds_telemetry_reporter def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') document = self.REPORT_READY_XML_TEMPLATE.format( @@ -419,6 +449,7 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') +@azure_ds_telemetry_reporter def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, -- cgit v1.2.3 From 528366820bb48c13957d0c58afc2a46a3ba84bef Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Wed, 3 Apr 2019 22:23:29 +0000 Subject: Azure: Treat _unset network configuration as if it were absent When the Azure datasource persists all of its metadata to the instance directory, it deliberately sets the self.network_config value to be the sources.UNSET value. The goal is to ensure that each time the system boots, fresh network configuration data is fetched from the cloud platform so that any control plane changes will take effect. When a VM is first created, there's no pickled instance to restore, so self._network_config is None, resulting in self.network_config() properly building a new config. Azure suffered from LP: #1801364 which prevented ds from being stored in obj.pkl in the instance directory, so subsequent reboots always regenerated their network configuration. Commit 0dc3a77f41f4544e4cb5a41637af7693410d4cdf introduced a new bug in which self.network_config() assumed the self._network_config value was either None or trustable; when the config was unpickled, that value was _unset, thus breaking the assumption. LP: #1823084 --- cloudinit/sources/DataSourceAzure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d4230b3c..76b16616 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -688,7 +688,7 @@ class DataSourceAzure(sources.DataSource): 2. Generate a fallback network config that does not include any of the blacklisted devices. """ - if not self._network_config: + if not self._network_config or self._network_config == sources.UNSET: if self.ds_cfg.get('apply_network_config'): nc_src = self._metadata_imds else: -- cgit v1.2.3 From 937555fd422edf8235430afab3c0ab69f9e3b3a4 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 18 Apr 2019 16:08:20 +0000 Subject: mount_cb: do not pass sync and rw options to mount On FreeBSD, mount_cd9660 does not accept the sync option that is enabled by default. In addition, the sync is only useful with the `rw` mode. However the `rw` mode was never used. This patch removes the `rw` and `sync` parameter of `mount_cb` to simplify the code base and resolve the FreeBSD issue. LP: #1645824 --- cloudinit/sources/DataSourceAzure.py | 2 +- cloudinit/sources/DataSourceConfigDrive.py | 7 ++----- cloudinit/util.py | 15 ++------------- 3 files changed, 5 insertions(+), 19 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 76b16616..64165259 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -407,7 +407,7 @@ class DataSourceAzure(sources.DataSource): elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, - mtype="udf", sync=False) + mtype="udf") else: ret = util.mount_cb(cdev, load_azure_ds_dir) else: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 564e3eb3..571d30dc 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -72,15 +72,12 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): dslist = self.sys_cfg.get('datasource_list') for dev in find_candidate_devs(dslist=dslist): try: - # Set mtype if freebsd and turn off sync - if dev.startswith("/dev/cd"): + if util.is_FreeBSD() and dev.startswith("/dev/cd"): mtype = "cd9660" - sync = False else: mtype = None - sync = True results = util.mount_cb(dev, read_config_drive, - mtype=mtype, sync=sync) + mtype=mtype) found = dev except openstack.NonReadable: pass diff --git a/cloudinit/util.py b/cloudinit/util.py index 385f231c..ea4199cd 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1679,7 +1679,7 @@ def mounts(): return mounted -def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, +def mount_cb(device, callback, data=None, mtype=None, update_env_for_mount=None): """ Mount the device, call method 'callback' passing the directory @@ -1726,18 +1726,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, for mtype in mtypes: mountpoint = None try: - mountcmd = ['mount'] - mountopts = [] - if rw: - mountopts.append('rw') - else: - mountopts.append('ro') - if sync: - # This seems like the safe approach to do - # (ie where this is on by default) - mountopts.append("sync") - if mountopts: - mountcmd.extend(["-o", ",".join(mountopts)]) + mountcmd = ['mount', '-o', 'ro'] if mtype: mountcmd.extend(['-t', mtype]) mountcmd.append(device) -- cgit v1.2.3 From ab6621d849b24bb652243e88c79f6f3b446048d7 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 8 May 2019 14:54:03 +0000 Subject: DataSourceAzure: Adjust timeout for polling IMDS If the IMDS primary server is not available, falling back to the secondary server takes about 1s. The net result is that the expected E2E time is slightly more than 1s. This change increases the timeout to 2s to prevent the infinite loop of timeouts. --- cloudinit/sources/DataSourceAzure.py | 15 ++++++++++----- tests/unittests/test_datasource/test_azure.py | 10 +++++++--- 2 files changed, 17 insertions(+), 8 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 64165259..b7440c1d 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -57,7 +57,12 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' + +# In the event where the IMDS primary server is not +# available, it takes 1s to fallback to the secondary one +IMDS_TIMEOUT_IN_SECONDS = 2 IMDS_URL = "http://169.254.169.254/metadata/" + PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by @@ -582,9 +587,9 @@ class DataSourceAzure(sources.DataSource): return self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True, - log_req_resp=False).contents + return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, exception_cb=exc_cb, + infinite=True, log_req_resp=False).contents except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -1291,8 +1296,8 @@ def _get_metadata_from_imds(retries): headers = {"Metadata": "true"} try: response = readurl( - url, timeout=1, headers=headers, retries=retries, - exception_cb=retry_on_url_exc) + url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, + retries=retries, exception_cb=retry_on_url_exc) except Exception as e: LOG.debug('Ignoring IMDS instance metadata: %s', e) return {} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index ab77c034..427ab7e7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -163,7 +163,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): m_readurl.assert_called_with( self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, timeout=1) + headers={'Metadata': 'true'}, retries=2, + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) @mock.patch('cloudinit.url_helper.time.sleep') @mock.patch(MOCKPATH + 'net.is_up') @@ -1791,7 +1792,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs() - }, method='GET', timeout=1, + }, method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( @@ -1828,7 +1830,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs()}, - method='GET', timeout=1, url=full_url)]) + method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', -- cgit v1.2.3 From feebec1cbb462208003460d68d909e76cb68e0e2 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 25 Jun 2019 16:06:27 +0000 Subject: azure: add region and AZ properties from imds compute location metadata This allows cloud-init query region to show valid region data for Azure --- cloudinit/sources/DataSourceAzure.py | 9 +++++ tests/unittests/test_datasource/test_azure.py | 47 +++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b7440c1d..d2fad9bb 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -683,6 +683,11 @@ class DataSourceAzure(sources.DataSource): DS_CFG_KEY_PRESERVE_NTFS, False)) return + @property + def availability_zone(self): + return self.metadata.get( + 'imds', {}).get('compute', {}).get('platformFaultDomain') + @property def network_config(self): """Generate a network config like net.generate_fallback_network() with @@ -701,6 +706,10 @@ class DataSourceAzure(sources.DataSource): self._network_config = parse_network_config(nc_src) return self._network_config + @property + def region(self): + return self.metadata.get('imds', {}).get('compute', {}).get('location') + def _partitions_on_device(devpath, maxnum=16): # return a list of tuples (ptnum, path) for each part on devpath diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index afb614e4..f27ef21b 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -84,6 +84,25 @@ def construct_valid_ovf_env(data=None, pubkeys=None, NETWORK_METADATA = { + "compute": { + "location": "eastus2", + "name": "my-hostname", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "srugroup1", + "sku": "19.04-DAILY", + "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", + "tags": "", + "version": "19.04.201906190", + "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", + "vmScaleSetName": "", + "vmSize": "Standard_DS1_v2", + "zone": "" + }, "network": { "interface": [ { @@ -478,13 +497,7 @@ scbus-1 on xpt0 bus 0 expected_metadata = { 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, - 'imds': {'network': {'interface': [{ - 'ipv4': {'ipAddress': [ - {'privateIpAddress': '10.0.0.4', - 'publicIpAddress': '104.46.124.81'}], - 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, - 'ipv6': {'ipAddress': []}, - 'macAddress': '000D3A047598'}]}}, + 'imds': NETWORK_METADATA, 'instance-id': 'test-instance-id', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -612,6 +625,26 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) + def test_availability_zone_set_from_imds(self): + """Datasource.availability returns IMDS platformFaultDomain.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('0', dsrc.availability_zone) + + def test_region_set_from_imds(self): + """Datasource.region returns IMDS region location.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('eastus2', dsrc.region) + def test_user_cfg_set_agent_command(self): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} -- cgit v1.2.3 From 7f674256c1426ffc419fd6b13e66a58754d94939 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 13 Aug 2019 20:13:05 +0000 Subject: azure/net: generate_fallback_nic emits network v2 config instead of v1 The function generate_fallback_config is used by Azure by default when not consuming IMDS configuration data. This function is also used by any datasource which does not implement it's own network config. This simple fallback configuration sets up dhcp on the most likely NIC. It will now emit network v2 instead of network v1. This is a step toward moving all components talking in v2 and allows us to avoid costly conversions between v1 and v2 for newer distributions which rely on netplan. --- cloudinit/net/__init__.py | 31 +++++--------- cloudinit/net/network_state.py | 12 ++++-- cloudinit/net/tests/test_init.py | 19 +++++---- cloudinit/sources/DataSourceAzure.py | 7 +++- tests/unittests/test_datasource/test_azure.py | 59 ++++++++++++++++++++++++++- tests/unittests/test_net.py | 41 +++++++++++++++++-- 6 files changed, 130 insertions(+), 39 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index f3cec794..ea707c09 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -265,32 +265,23 @@ def find_fallback_nic(blacklist_drivers=None): def generate_fallback_config(blacklist_drivers=None, config_driver=None): - """Determine which attached net dev is most likely to have a connection and - generate network state to run dhcp on that interface""" - + """Generate network cfg v2 for dhcp on the NIC most likely connected.""" if not config_driver: config_driver = False target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers) - if target_name: - target_mac = read_sys_net_safe(target_name, 'address') - nconf = {'config': [], 'version': 1} - cfg = {'type': 'physical', 'name': target_name, - 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} - # inject the device driver name, dev_id into config if enabled and - # device has a valid device driver value - if config_driver: - driver = device_driver(target_name) - if driver: - cfg['params'] = { - 'driver': driver, - 'device_id': device_devid(target_name), - } - nconf['config'].append(cfg) - return nconf - else: + if not target_name: # can't read any interfaces addresses (or there are none); give up return None + target_mac = read_sys_net_safe(target_name, 'address') + cfg = {'dhcp4': True, 'set-name': target_name, + 'match': {'macaddress': target_mac.lower()}} + if config_driver: + driver = device_driver(target_name) + if driver: + cfg['match']['driver'] = driver + nconf = {'ethernets': {target_name: cfg}, 'version': 2} + return nconf def extract_physdevs(netcfg): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 0ca576b6..c0c415d0 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -596,6 +596,7 @@ class NetworkStateInterpreter(object): eno1: match: macaddress: 00:11:22:33:44:55 + driver: hv_netsvc wakeonlan: true dhcp4: true dhcp6: false @@ -631,15 +632,18 @@ class NetworkStateInterpreter(object): 'type': 'physical', 'name': cfg.get('set-name', eth), } - mac_address = cfg.get('match', {}).get('macaddress', None) + match = cfg.get('match', {}) + mac_address = match.get('macaddress', None) if not mac_address: LOG.debug('NetworkState Version2: missing "macaddress" info ' 'in config entry: %s: %s', eth, str(cfg)) - phy_cmd.update({'mac_address': mac_address}) - + phy_cmd['mac_address'] = mac_address + driver = match.get('driver', None) + if driver: + phy_cmd['params'] = {'driver': driver} for key in ['mtu', 'match', 'wakeonlan']: if key in cfg: - phy_cmd.update({key: cfg.get(key)}) + phy_cmd[key] = cfg[key] subnets = self._v2_to_v1_ipcfg(cfg) if len(subnets) > 0: diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index e6e77d7a..d2e38f00 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -212,9 +212,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth1', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth1': {'match': {'macaddress': mac}, + 'dhcp4': True, 'set-name': 'eth1'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_dormant_eth_with_mac(self): @@ -223,9 +223,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True, + 'set-name': 'eth0'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_eth_by_operstate(self): @@ -233,9 +233,10 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': { + 'eth0': {'dhcp4': True, 'match': {'macaddress': mac}, + 'set-name': 'eth0'}}, + 'version': 2} valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown'] for state in valid_operstates: write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d2fad9bb..e6ed2f3b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1241,7 +1241,7 @@ def parse_network_config(imds_metadata): privateIpv4 = addr4['privateIpAddress'] if privateIpv4: if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 + # Append static address config for ip > 1 netPrefix = intf['ipv4']['subnet'][0].get( 'prefix', '24') if not dev_config.get('addresses'): @@ -1251,6 +1251,11 @@ def parse_network_config(imds_metadata): ip=privateIpv4, prefix=netPrefix)) else: dev_config['dhcp4'] = True + # non-primary interfaces should have a higher + # route-metric (cost) so default routes prefer + # primary nic due to lower route-metric value + dev_config['dhcp4-overrides'] = { + 'route-metric': (idx + 1) * 100} for addr6 in intf['ipv6']['ipAddress']: privateIpv6 = addr6['privateIpAddress'] if privateIpv6: diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 2de2aea2..4d57cebc 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -12,6 +12,7 @@ from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, ExitStack, resourceLocation) +import copy import crypt import httpretty import json @@ -129,6 +130,26 @@ NETWORK_METADATA = { } } +SECONDARY_INTERFACE = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.1.5", + } + ] + } +} + MOCKPATH = 'cloudinit.sources.DataSourceAzure.' @@ -619,8 +640,43 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp4': True}}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + def test_network_config_set_from_imds_route_metric_for_secondary_nic(self): + """Datasource.network_config adds route-metric to secondary nics.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + + self.m_get_metadata_from_imds.return_value = imds_data dsrc = self._get_ds(data) dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) @@ -925,6 +981,7 @@ scbus-1 on xpt0 bus 0 expected_cfg = { 'ethernets': { 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}}, 'version': 2} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 1840ade0..4f7e4207 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2156,7 +2156,7 @@ DEFAULT_DEV_ATTRS = { "carrier": False, "dormant": False, "operstate": "down", - "address": "07-1C-C6-75-A4-BE", + "address": "07-1c-c6-75-a4-be", "device/driver": None, "device/device": None, "name_assign_type": "4", @@ -2204,6 +2204,39 @@ class TestGenerateFallbackConfig(CiTestCase): "cloudinit.util.get_cmdline", "m_get_cmdline", return_value="root=/dev/sda1") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_device_driver_v2(self, mock_get_devicelist, mock_read_sys_net, + mock_sys_dev_path): + """Network configuration for generate_fallback_config is version 2.""" + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': '4'}, + 'eth1': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + + } + + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + + network_cfg = net.generate_fallback_config(config_driver=True) + expected = { + 'ethernets': {'eth0': {'dhcp4': True, 'set-name': 'eth0', + 'match': {'macaddress': '00:11:22:33:44:55', + 'driver': 'hv_netsvc'}}}, + 'version': 2} + self.assertEqual(expected, network_cfg) + @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") @@ -2486,7 +2519,7 @@ class TestRhelSysConfigRendering(CiTestCase): # BOOTPROTO=dhcp DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE +HWADDR=07-1c-c6-75-a4-be NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -3030,7 +3063,7 @@ class TestOpenSuseSysConfigRendering(CiTestCase): # BOOTPROTO=dhcp DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE +HWADDR=07-1c-c6-75-a4-be NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -3342,13 +3375,13 @@ class TestNetplanNetRendering(CiTestCase): expected = """ network: - version: 2 ethernets: eth1000: dhcp4: true match: macaddress: 07-1c-c6-75-a4-be set-name: eth1000 + version: 2 """ self.assertEqual(expected.lstrip(), contents.lstrip()) self.assertEqual(1, mock_clean_default.call_count) -- cgit v1.2.3 From 2f3bb764626b9065f4102c7c0a67998a9c174444 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 14 Aug 2019 21:03:13 +0000 Subject: Azure: Record boot timestamps, system information, and diagnostic events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Collect and record the following information through KVP:  + timestamps related to kernel initialization and systemd activation    of cloud-init services  + system information including cloud-init version, kernel version,    distro version, and python version  + diagnostic events for the most common provisioning error issues    such as empty dhcp lease, corrupted ovf-env.xml, etc. + increasing the log frequency of polling IMDS during reprovision. --- cloudinit/sources/DataSourceAzure.py | 157 ++++++++++++++++++++----- cloudinit/sources/helpers/azure.py | 160 ++++++++++++++++++++++++-- tests/unittests/test_datasource/test_azure.py | 15 ++- tests/unittests/test_reporting_hyperv.py | 65 +++++++++++ 4 files changed, 353 insertions(+), 44 deletions(-) mode change 100755 => 100644 tests/unittests/test_reporting_hyperv.py (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e6ed2f3b..4984fa84 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -26,9 +26,14 @@ from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util from cloudinit.reporting import events -from cloudinit.sources.helpers.azure import (azure_ds_reporter, - azure_ds_telemetry_reporter, - get_metadata_from_fabric) +from cloudinit.sources.helpers.azure import ( + azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric, + get_boot_telemetry, + get_system_info, + report_diagnostic_event, + EphemeralDHCPv4WithReporting) LOG = logging.getLogger(__name__) @@ -354,7 +359,7 @@ class DataSourceAzure(sources.DataSource): bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] LOG.debug("ssh authentication: " - "using fingerprint from fabirc") + "using fingerprint from fabric") with events.ReportEventStack( name="waiting-for-ssh-public-key", @@ -419,12 +424,17 @@ class DataSourceAzure(sources.DataSource): ret = load_azure_ds_dir(cdev) except NonAzureDataSource: + report_diagnostic_event( + "Did not find Azure data source in %s" % cdev) continue except BrokenAzureDataSource as exc: msg = 'BrokenAzureDataSource: %s' % exc + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) except util.MountFailedError: - LOG.warning("%s was not mountable", cdev) + msg = '%s was not mountable' % cdev + report_diagnostic_event(msg) + LOG.warning(msg) continue perform_reprovision = reprovision or self._should_reprovision(ret) @@ -432,6 +442,7 @@ class DataSourceAzure(sources.DataSource): if util.is_FreeBSD(): msg = "Free BSD is not supported for PPS VMs" LOG.error(msg) + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( @@ -450,7 +461,9 @@ class DataSourceAzure(sources.DataSource): break if not found: - raise sources.InvalidMetaDataException('No Azure metadata found') + msg = 'No Azure metadata found' + report_diagnostic_event(msg) + raise sources.InvalidMetaDataException(msg) if found == ddir: LOG.debug("using files cached in %s", ddir) @@ -469,9 +482,14 @@ class DataSourceAzure(sources.DataSource): self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral else: - with EphemeralDHCPv4() as lease: - self._report_ready(lease=lease) - + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter) as lease: + self._report_ready(lease=lease) + except Exception as e: + report_diagnostic_event( + "exception while reporting ready: %s" % e) + raise return crawled_data def _is_platform_viable(self): @@ -492,6 +510,16 @@ class DataSourceAzure(sources.DataSource): """ if not self._is_platform_viable(): return False + try: + get_boot_telemetry() + except Exception as e: + LOG.warning("Failed to get boot telemetry: %s", e) + + try: + get_system_info() + except Exception as e: + LOG.warning("Failed to get system information: %s", e) + try: crawled_data = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', @@ -551,27 +579,55 @@ class DataSourceAzure(sources.DataSource): headers = {"Metadata": "true"} nl_sock = None report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) + self.imds_logging_threshold = 1 + self.imds_poll_counter = 1 + dhcp_attempts = 0 + vnet_switched = False + return_val = None def exc_cb(msg, exception): if isinstance(exception, UrlError) and exception.code == 404: + if self.imds_poll_counter == self.imds_logging_threshold: + # Reducing the logging frequency as we are polling IMDS + self.imds_logging_threshold *= 2 + LOG.debug("Call to IMDS with arguments %s failed " + "with status code %s after %s retries", + msg, exception.code, self.imds_poll_counter) + LOG.debug("Backing off logging threshold for the same " + "exception to %d", self.imds_logging_threshold) + self.imds_poll_counter += 1 return True + # If we get an exception while trying to call IMDS, we # call DHCP and setup the ephemeral network to acquire the new IP. + LOG.debug("Call to IMDS with arguments %s failed with " + "status code %s", msg, exception.code) + report_diagnostic_event("polling IMDS failed with exception %s" + % exception.code) return False LOG.debug("Wait for vnetswitch to happen") while True: try: - # Save our EphemeralDHCPv4 context so we avoid repeated dhcp - self._ephemeral_dhcp_ctx = EphemeralDHCPv4() - lease = self._ephemeral_dhcp_ctx.obtain_lease() + # Save our EphemeralDHCPv4 context to avoid repeated dhcp + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=azure_ds_reporter): + self._ephemeral_dhcp_ctx = EphemeralDHCPv4() + lease = self._ephemeral_dhcp_ctx.obtain_lease() + + if vnet_switched: + dhcp_attempts += 1 if report_ready: try: nl_sock = netlink.create_bound_netlink_socket() except netlink.NetlinkCreateSocketError as e: + report_diagnostic_event(e) LOG.warning(e) self._ephemeral_dhcp_ctx.clean_network() - return + break + path = REPORTED_READY_MARKER_FILE LOG.info( "Creating a marker file to report ready: %s", path) @@ -579,17 +635,33 @@ class DataSourceAzure(sources.DataSource): pid=os.getpid(), time=time())) self._report_ready(lease=lease) report_ready = False - try: - netlink.wait_for_media_disconnect_connect( - nl_sock, lease['interface']) - except AssertionError as error: - LOG.error(error) - return + + with events.ReportEventStack( + name="wait-for-media-disconnect-connect", + description="wait for vnet switch", + parent=azure_ds_reporter): + try: + netlink.wait_for_media_disconnect_connect( + nl_sock, lease['interface']) + except AssertionError as error: + report_diagnostic_event(error) + LOG.error(error) + break + + vnet_switched = True self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, - headers=headers, exception_cb=exc_cb, - infinite=True, log_req_resp=False).contents + with events.ReportEventStack( + name="get-reprovision-data-from-imds", + description="get reprovision data from imds", + parent=azure_ds_reporter): + return_val = readurl(url, + timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, + exception_cb=exc_cb, + infinite=True, + log_req_resp=False).contents + break except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -598,6 +670,14 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + if vnet_switched: + report_diagnostic_event("attempted dhcp %d times after reuse" % + dhcp_attempts) + report_diagnostic_event("polled imds %d times after reuse" % + self.imds_poll_counter) + + return return_val + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ @@ -666,9 +746,12 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['agent_command']) try: fabric_data = metadata_func() - except Exception: + except Exception as e: + report_diagnostic_event( + "Error communicating with Azure fabric; You may experience " + "connectivity issues: %s" % e) LOG.warning( - "Error communicating with Azure fabric; You may experience." + "Error communicating with Azure fabric; You may experience " "connectivity issues.", exc_info=True) return False @@ -1027,7 +1110,9 @@ def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: - raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) + error_str = "Invalid ovf-env.xml: %s" % e + report_diagnostic_event(error_str) + raise BrokenAzureDataSource(error_str) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") @@ -1299,8 +1384,13 @@ def get_metadata_from_imds(fallback_nic, retries): if net.is_up(fallback_nic): return util.log_time(**kwargs) else: - with EphemeralDHCPv4(fallback_nic): - return util.log_time(**kwargs) + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter, fallback_nic): + return util.log_time(**kwargs) + except Exception as e: + report_diagnostic_event("exception while getting metadata: %s" % e) + raise @azure_ds_telemetry_reporter @@ -1313,11 +1403,14 @@ def _get_metadata_from_imds(retries): url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, retries=retries, exception_cb=retry_on_url_exc) except Exception as e: - LOG.debug('Ignoring IMDS instance metadata: %s', e) + msg = 'Ignoring IMDS instance metadata: %s' % e + report_diagnostic_event(msg) + LOG.debug(msg) return {} try: return util.load_json(str(response)) - except json.decoder.JSONDecodeError: + except json.decoder.JSONDecodeError as e: + report_diagnostic_event('non-json imds response' % e) LOG.warning( 'Ignoring non-json IMDS instance metadata: %s', str(response)) return {} @@ -1370,8 +1463,10 @@ def _is_platform_viable(seed_dir): asset_tag = util.read_dmi_data('chassis-asset-tag') if asset_tag == AZURE_CHASSIS_ASSET_TAG: return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag + LOG.debug(msg) + evt.description = msg + report_diagnostic_event(msg) if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): return True return False diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 82c4c8c4..f1fba175 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,7 +16,11 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit import version +from cloudinit import distros from cloudinit.reporting import events +from cloudinit.net.dhcp import EphemeralDHCPv4 +from datetime import datetime LOG = logging.getLogger(__name__) @@ -24,6 +28,10 @@ LOG = logging.getLogger(__name__) # value is applied if the endpoint can't be found within a lease file DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" +BOOT_EVENT_TYPE = 'boot-telemetry' +SYSTEMINFO_EVENT_TYPE = 'system-info' +DIAGNOSTIC_EVENT_TYPE = 'diagnostic' + azure_ds_reporter = events.ReportEventStack( name="azure-ds", description="initialize reporter for azure ds", @@ -40,6 +48,105 @@ def azure_ds_telemetry_reporter(func): return impl +@azure_ds_telemetry_reporter +def get_boot_telemetry(): + """Report timestamps related to kernel initialization and systemd + activation of cloud-init""" + if not distros.uses_systemd(): + raise RuntimeError( + "distro not using systemd, skipping boot telemetry") + + LOG.debug("Collecting boot telemetry") + try: + kernel_start = float(time.time()) - float(util.uptime()) + except ValueError: + raise RuntimeError("Failed to determine kernel start timestamp") + + try: + out, _ = util.subp(['/bin/systemctl', + 'show', '-p', + 'UserspaceTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + + if not tsm: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd") + + user_start = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd: %s" + % e) + + try: + out, _ = util.subp(['/bin/systemctl', 'show', + 'cloud-init-local', '-p', + 'InactiveExitTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + if not tsm: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd") + + cloudinit_activation = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd: %s" + % e) + + evt = events.ReportingEvent( + BOOT_EVENT_TYPE, 'boot-telemetry', + "kernel_start=%s user_start=%s cloudinit_activation=%s" % + (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z', + datetime.utcfromtimestamp(user_start).isoformat() + 'Z', + datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'), + events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +@azure_ds_telemetry_reporter +def get_system_info(): + """Collect and report system information""" + info = util.system_info() + evt = events.ReportingEvent( + SYSTEMINFO_EVENT_TYPE, 'system information', + "cloudinit_version=%s, kernel_version=%s, variant=%s, " + "distro_name=%s, distro_version=%s, flavor=%s, " + "python_version=%s" % + (version.version_string(), info['release'], info['variant'], + info['dist'][0], info['dist'][1], info['dist'][2], + info['python']), events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +def report_diagnostic_event(str): + """Report a diagnostic event""" + evt = events.ReportingEvent( + DIAGNOSTIC_EVENT_TYPE, 'diagnostic message', + str, events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -360,16 +467,19 @@ class WALinuxAgentShim(object): value = dhcp245 LOG.debug("Using Azure Endpoint from dhcp options") if value is None: + report_diagnostic_event("No Azure endpoint from dhcp options") LOG.debug('Finding Azure endpoint from networkd...') value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/.json # a dhclient exit hook that calls cloud-init-dhclient-hook + report_diagnostic_event("No Azure endpoint from networkd") LOG.debug('Finding Azure endpoint from hook json...') dhcp_options = WALinuxAgentShim._load_dhclient_json() value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options) if value is None: # Fallback and check the leases file if unsuccessful + report_diagnostic_event("No Azure endpoint from dhclient logs") LOG.debug("Unable to find endpoint in dhclient logs. " " Falling back to check lease files") if fallback_lease_file is None: @@ -381,11 +491,15 @@ class WALinuxAgentShim(object): value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) if value is None: - LOG.warning("No lease found; using default endpoint") + msg = "No lease found; using default endpoint" + report_diagnostic_event(msg) + LOG.warning(msg) value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + msg = 'Azure endpoint found at %s' % endpoint_ip_address + report_diagnostic_event(msg) + LOG.debug(msg) return endpoint_ip_address @azure_ds_telemetry_reporter @@ -399,16 +513,19 @@ class WALinuxAgentShim(object): try: response = http_client.get( 'http://{0}/machine/?comp=goalstate'.format(self.endpoint)) - except Exception: + except Exception as e: if attempts < 10: time.sleep(attempts + 1) else: + report_diagnostic_event( + "failed to register with Azure: %s" % e) raise else: break attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) + report_diagnostic_event("container_id %s" % goal_state.container_id) ssh_keys = [] if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') @@ -449,11 +566,20 @@ class WALinuxAgentShim(object): container_id=goal_state.container_id, instance_id=goal_state.instance_id, ) - http_client.post( - "http://{0}/machine?comp=health".format(self.endpoint), - data=document, - extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, - ) + # Host will collect kvps when cloud-init reports ready. + # some kvps might still be in the queue. We yield the scheduler + # to make sure we process all kvps up till this point. + time.sleep(0) + try: + http_client.post( + "http://{0}/machine?comp=health".format(self.endpoint), + data=document, + extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, + ) + except Exception as e: + report_diagnostic_event("exception while reporting ready: %s" % e) + raise + LOG.info('Reported ready to Azure fabric.') @@ -467,4 +593,22 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, finally: shim.clean_up() + +class EphemeralDHCPv4WithReporting(object): + def __init__(self, reporter, nic=None): + self.reporter = reporter + self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic) + + def __enter__(self): + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=self.reporter): + return self.ephemeralDHCPv4.__enter__() + + def __exit__(self, excp_type, excp_value, excp_traceback): + self.ephemeralDHCPv4.__exit__( + excp_type, excp_value, excp_traceback) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 4d57cebc..3547dd94 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -181,7 +181,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): self.logs.getvalue()) @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') @mock.patch(MOCKPATH + 'net.is_up') def test_get_metadata_performs_dhcp_when_network_is_down( self, m_net_is_up, m_dhcp, m_readurl): @@ -195,7 +195,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): dsaz.get_metadata_from_imds('eth9', retries=2)) m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_called_with('eth9') + m_dhcp.assert_called_with(mock.ANY, 'eth9') self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time self.logs.getvalue()) @@ -552,7 +552,8 @@ scbus-1 on xpt0 bus 0 dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) - @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') @mock.patch( 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') @@ -1308,7 +1309,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) - def test_environment_correct_for_bounce_command(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_environment_correct_for_bounce_command( + self, mock_get_boot_telemetry): interface = 'int0' hostname = 'my-new-host' old_hostname = 'my-old-host' @@ -1324,7 +1327,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(hostname, bounce_env['hostname']) self.assertEqual(old_hostname, bounce_env['old_hostname']) - def test_default_bounce_command_ifup_used_by_default(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_default_bounce_command_ifup_used_by_default( + self, mock_get_boot_telemetry): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py old mode 100755 new mode 100644 index d01ed5b3..640895a4 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -7,9 +7,12 @@ import json import os import struct import time +import re +import mock from cloudinit import util from cloudinit.tests.helpers import CiTestCase +from cloudinit.sources.helpers import azure class TestKvpEncoding(CiTestCase): @@ -126,3 +129,65 @@ class TextKvpReporter(CiTestCase): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) kvps = list(reporter._iterate_kvps(0)) self.assertEqual(0, len(kvps)) + + @mock.patch('cloudinit.distros.uses_systemd') + @mock.patch('cloudinit.util.subp') + def test_get_boot_telemetry(self, m_subp, m_sysd): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" + r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)" + + # get_boot_telemetry makes two subp calls to systemctl. We provide + # a list of values that the subp calls should return + m_subp.side_effect = [ + ('UserspaceTimestampMonotonic=1844838', ''), + ('InactiveExitTimestampMonotonic=3068203', '')] + m_sysd.return_value = True + + reporter.publish_event(azure.get_boot_telemetry()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + evt_msg = kvps[0]['value'] + if not re.search("kernel_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing kernel_start timestamp") + if not re.search("user_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing user_start timestamp") + if not re.search("cloudinit_activation=" + datetime_pattern, + evt_msg): + raise AssertionError( + "missing cloudinit_activation timestamp") + + def test_get_system_info(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + pattern = r"[^=\s]+" + + reporter.publish_event(azure.get_system_info()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + # the most important information is cloudinit version, + # kernel_version, and the distro variant. It is ok if + # if the rest is not available + if not re.search("cloudinit_version=" + pattern, evt_msg): + raise AssertionError("missing cloudinit_version string") + if not re.search("kernel_version=" + pattern, evt_msg): + raise AssertionError("missing kernel_version string") + if not re.search("variant=" + pattern, evt_msg): + raise AssertionError("missing distro variant string") + + def test_report_diagnostic_event(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + + reporter.publish_event( + azure.report_diagnostic_event("test_diagnostic")) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + if "test_diagnostic" not in evt_msg: + raise AssertionError("missing expected diagnostic message") -- cgit v1.2.3 From e1b4b8c903fed3b69e57ec08c17ce94097d55901 Mon Sep 17 00:00:00 2001 From: Sam Eiderman Date: Tue, 29 Oct 2019 23:00:36 +0000 Subject: azure: Do not lock user on instance id change After initial boot ovf-env.xml is copied to agent dir (/var/lib/waagent/) with REDACTED password. On subsequent boots DataSourceAzure loads with a configuration where the user specified in /var/lib/waagent/ovf-env.xml is locked. If instance id changes, cc_users_groups action will lock the user. Fix this behavior by not locking the user if its password is REDACTED. LP: #1849677 --- cloudinit/sources/DataSourceAzure.py | 5 +++-- tests/unittests/test_datasource/test_azure.py | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 4984fa84..cdf49d36 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1193,9 +1193,10 @@ def read_azure_ovf(contents): defuser = {} if username: defuser['name'] = username - if password and DEF_PASSWD_REDACTION != password: - defuser['passwd'] = encrypt_pass(password) + if password: defuser['lock_passwd'] = False + if DEF_PASSWD_REDACTION != password: + defuser['passwd'] = encrypt_pass(password) if defuser: cfg['system_info'] = {'default_user': defuser} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3547dd94..80c6f019 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -769,6 +769,22 @@ scbus-1 on xpt0 bus 0 crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) + def test_user_not_locked_if_password_redacted(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': dsaz.DEF_PASSWD_REDACTION} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue('default_user' in dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user should be updated username and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertIn('lock_passwd', defuser) + self.assertFalse(defuser['lock_passwd']) + def test_userdata_plain(self): mydata = "FOOBAR" odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} -- cgit v1.2.3 From 02f07b666adc62d70c4f1a98c2ae80cb6629fa9a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 4 Nov 2019 22:11:37 +0000 Subject: azure: support matching dhcp route-metrics for dual-stack ipv4 ipv6 Network v2 configuration for Azure will set both dhcp4 and dhcp6 to False by default. When IPv6 privateIpAddresses are present for an interface in Azure's Instance Metadata Service (IMDS), set dhcp6: True and provide a route-metric value that will match the corresponding dhcp4 route-metric. The route-metric value will increase by 100 for each additional interface present to ensure the primary interface has a route to IMDS. Also fix dhcp route-metric rendering for eni and sysconfig distros. LP: #1850308 --- cloudinit/net/network_state.py | 17 ++++- cloudinit/net/sysconfig.py | 6 +- cloudinit/sources/DataSourceAzure.py | 10 ++- tests/unittests/test_datasource/test_azure.py | 101 ++++++++++++++++++++++++++ tests/unittests/test_net.py | 54 ++++++++++++++ 5 files changed, 178 insertions(+), 10 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index ba85c69e..20b7716b 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -22,8 +22,9 @@ NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } NETWORK_V2_KEY_FILTER = [ - 'addresses', 'dhcp4', 'dhcp6', 'gateway4', 'gateway6', 'interfaces', - 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan' + 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides', + 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers', + 'renderer', 'set-name', 'wakeonlan' ] NET_CONFIG_TO_V2 = { @@ -747,12 +748,20 @@ class NetworkStateInterpreter(object): def _v2_to_v1_ipcfg(self, cfg): """Common ipconfig extraction from v2 to v1 subnets array.""" + def _add_dhcp_overrides(overrides, subnet): + if 'route-metric' in overrides: + subnet['metric'] = overrides['route-metric'] + subnets = [] if cfg.get('dhcp4'): - subnets.append({'type': 'dhcp4'}) + subnet = {'type': 'dhcp4'} + _add_dhcp_overrides(cfg.get('dhcp4-overrides', {}), subnet) + subnets.append(subnet) if cfg.get('dhcp6'): + subnet = {'type': 'dhcp6'} self.use_ipv6 = True - subnets.append({'type': 'dhcp6'}) + _add_dhcp_overrides(cfg.get('dhcp6-overrides', {}), subnet) + subnets.append(subnet) gateway4 = None gateway6 = None diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 6717d924..fe0c67ca 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -395,6 +395,9 @@ class Renderer(renderer.Renderer): ipv6_index = -1 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): subnet_type = subnet.get('type') + # metric may apply to both dhcp and static config + if 'metric' in subnet: + iface_cfg['METRIC'] = subnet['metric'] if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: if has_default_route and iface_cfg['BOOTPROTO'] != 'none': iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False @@ -426,9 +429,6 @@ class Renderer(renderer.Renderer): else: iface_cfg['GATEWAY'] = subnet['gateway'] - if 'metric' in subnet: - iface_cfg['METRIC'] = subnet['metric'] - if 'dns_search' in subnet: iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search']) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index cdf49d36..44cca210 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1322,7 +1322,8 @@ def parse_network_config(imds_metadata): network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} + dev_config = {'dhcp4': False, 'dhcp6': False} + dhcp_override = {'route-metric': (idx + 1) * 100} for addr4 in intf['ipv4']['ipAddress']: privateIpv4 = addr4['privateIpAddress'] if privateIpv4: @@ -1340,12 +1341,15 @@ def parse_network_config(imds_metadata): # non-primary interfaces should have a higher # route-metric (cost) so default routes prefer # primary nic due to lower route-metric value - dev_config['dhcp4-overrides'] = { - 'route-metric': (idx + 1) * 100} + dev_config['dhcp4-overrides'] = dhcp_override for addr6 in intf['ipv6']['ipAddress']: privateIpv6 = addr6['privateIpAddress'] if privateIpv6: dev_config['dhcp6'] = True + # non-primary interfaces should have a higher + # route-metric (cost) so default routes prefer + # primary nic due to lower route-metric value + dev_config['dhcp6-overrides'] = dhcp_override break if dev_config: mac = ':'.join(re.findall(r'..', intf['macAddress'])) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 80c6f019..d92d7b2f 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -153,6 +153,102 @@ SECONDARY_INTERFACE = { MOCKPATH = 'cloudinit.sources.DataSourceAzure.' +class TestParseNetworkConfig(CiTestCase): + + maxDiff = None + + def test_single_ipv4_nic_configuration(self): + """parse_network_config emits dhcp on single nic with ipv4""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) + + def test_increases_route_metric_for_non_primary_nics(self): + """parse_network_config increases route-metric for each nic""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_and_ipv6_route_metrics_match_for_nics(self): + """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp6': False, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + third_intf['ipv6'] = { + "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_secondary_ips_will_be_static_addrs(self): + """parse_network_config emits primary ipv4 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + # Secondary ipv6 addresses currently ignored/unconfigured + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + class TestGetMetadataFromIMDS(HttprettyTestCase): with_logs = True @@ -641,6 +737,7 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}}}, 'version': 2} @@ -658,14 +755,17 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}}, 'eth1': {'set-name': 'eth1', 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200}}, 'eth2': {'set-name': 'eth2', 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} @@ -999,6 +1099,7 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}}, 'version': 2} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 6f83ad73..35ce55d2 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3101,6 +3101,36 @@ USERCTL=no self._compare_files_to_expected( expected, self._render_and_read(network_config=v2data)) + def test_from_v2_route_metric(self): + """verify route-metric gets rendered on nic when source is netplan.""" + overrides = {'route-metric': 100} + v2base = { + 'version': 2, + 'ethernets': { + 'eno1': {'dhcp4': True, + 'match': {'macaddress': '07-1c-c6-75-a4-be'}}}} + expected = { + 'ifcfg-eno1': textwrap.dedent("""\ + BOOTPROTO=dhcp + DEVICE=eno1 + HWADDR=07-1c-c6-75-a4-be + METRIC=100 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + } + for dhcp_ver in ('dhcp4', 'dhcp6'): + v2data = copy.deepcopy(v2base) + if dhcp_ver == 'dhcp6': + expected['ifcfg-eno1'] += "IPV6INIT=yes\nDHCPV6C=yes\n" + v2data['ethernets']['eno1'].update( + {dhcp_ver: True, '{0}-overrides'.format(dhcp_ver): overrides}) + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + class TestOpenSuseSysConfigRendering(CiTestCase): @@ -3466,6 +3496,30 @@ iface eth0 inet dhcp self.assertEqual( expected, dir2dict(tmp_dir)['/etc/network/interfaces']) + def test_v2_route_metric_to_eni(self): + """Network v2 route-metric overrides are preserved in eni output""" + tmp_dir = self.tmp_dir() + renderer = eni.Renderer() + expected_tmpl = textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet{suffix} dhcp + metric 100 + """) + for dhcp_ver in ('dhcp4', 'dhcp6'): + suffix = '6' if dhcp_ver == 'dhcp6' else '' + dhcp_cfg = { + dhcp_ver: True, + '{ver}-overrides'.format(ver=dhcp_ver): {'route-metric': 100}} + v2_input = {'version': 2, 'ethernets': {'eth0': dhcp_cfg}} + ns = network_state.parse_net_config_data(v2_input) + renderer.render_network_state(ns, target=tmp_dir) + self.assertEqual( + expected_tmpl.format(suffix=suffix), + dir2dict(tmp_dir)['/etc/network/interfaces']) + class TestNetplanNetRendering(CiTestCase): -- cgit v1.2.3 From 9478f0f2fa6935d685092f344b23f34b883149a5 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 13 Nov 2019 13:00:12 -0700 Subject: azure: support secondary ipv6 addresses (#33) Azure's Instance Metadata Service (IMDS) reports multiple IPv6 addresses, via the http://169.254.169.254/metadata/instance/network route. Any additional values after the first in 'ipAddresses' under the 'ipv6' interface key are extracted and configured as static IPs on the interface. --- cloudinit/sources/DataSourceAzure.py | 49 +++++++++++++-------------- tests/unittests/test_datasource/test_azure.py | 34 +++++++++++++++++-- 2 files changed, 56 insertions(+), 27 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 44cca210..87a848ce 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1321,36 +1321,35 @@ def parse_network_config(imds_metadata): LOG.debug('Azure: generating network configuration from IMDS') network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): + # First IPv4 and/or IPv6 address will be obtained via DHCP. + # Any additional IPs of each type will be set as static + # addresses. nicname = 'eth{idx}'.format(idx=idx) - dev_config = {'dhcp4': False, 'dhcp6': False} dhcp_override = {'route-metric': (idx + 1) * 100} - for addr4 in intf['ipv4']['ipAddress']: - privateIpv4 = addr4['privateIpAddress'] - if privateIpv4: - if dev_config.get('dhcp4', False): - # Append static address config for ip > 1 - netPrefix = intf['ipv4']['subnet'][0].get( - 'prefix', '24') - if not dev_config.get('addresses'): - dev_config['addresses'] = [] - dev_config['addresses'].append( - '{ip}/{prefix}'.format( - ip=privateIpv4, prefix=netPrefix)) - else: - dev_config['dhcp4'] = True + dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override, + 'dhcp6': False} + for addr_type in ('ipv4', 'ipv6'): + addresses = intf.get(addr_type, {}).get('ipAddress', []) + if addr_type == 'ipv4': + default_prefix = '24' + else: + default_prefix = '128' + if addresses: + dev_config['dhcp6'] = True # non-primary interfaces should have a higher # route-metric (cost) so default routes prefer # primary nic due to lower route-metric value - dev_config['dhcp4-overrides'] = dhcp_override - for addr6 in intf['ipv6']['ipAddress']: - privateIpv6 = addr6['privateIpAddress'] - if privateIpv6: - dev_config['dhcp6'] = True - # non-primary interfaces should have a higher - # route-metric (cost) so default routes prefer - # primary nic due to lower route-metric value - dev_config['dhcp6-overrides'] = dhcp_override - break + dev_config['dhcp6-overrides'] = dhcp_override + for addr in addresses[1:]: + # Append static address config for ip > 1 + netPrefix = intf[addr_type]['subnet'][0].get( + 'prefix', default_prefix) + privateIp = addr['privateIpAddress'] + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIp, prefix=netPrefix)) if dev_config: mac = ':'.join(re.findall(r'..', intf['macAddress'])) dev_config.update( diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index d92d7b2f..59e351de 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -197,9 +197,11 @@ class TestParseNetworkConfig(CiTestCase): def test_ipv4_and_ipv6_route_metrics_match_for_nics(self): """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" expected = {'ethernets': { - 'eth0': {'dhcp4': True, + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'], + 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}, 'eth1': {'set-name': 'eth1', @@ -214,6 +216,14 @@ class TestParseNetworkConfig(CiTestCase): 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + nic1['ipv6'] = { + "subnet": [{"address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } imds_data['network']['interface'].append(SECONDARY_INTERFACE) third_intf = copy.deepcopy(SECONDARY_INTERFACE) third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') @@ -240,6 +250,26 @@ class TestParseNetworkConfig(CiTestCase): nic1 = imds_data['network']['interface'][0] nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv6_secondary_ips_will_be_static_cidrs(self): + """parse_network_config emits primary ipv6 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + # Secondary ipv6 addresses currently ignored/unconfigured nic1['ipv6'] = { "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], -- cgit v1.2.3 From 129b1c4ea250619bd7caed7aaffacc796b0139f2 Mon Sep 17 00:00:00 2001 From: AOhassan <37305877+AOhassan@users.noreply.github.com> Date: Thu, 12 Dec 2019 13:51:42 -0800 Subject: azure: avoid re-running cloud-init when instance-id is byte-swapped (#84) Azure stores the instance ID with an incorrect byte ordering for the first three hyphen delimited parts. This results in invalid is_new_instance checks forcing Azure datasource to recrawl the metadata service. When persisting instance-id from the metadata service, swap the instance-id string byte order such that it is consistent with that returned by dmi information. Check whether the instance-id string is a byte-swapped match when determining correctly whether the Azure platform instance-id has actually changed. --- cloudinit/sources/DataSourceAzure.py | 16 ++++++++++--- cloudinit/sources/helpers/azure.py | 27 ++++++++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++--- .../unittests/test_datasource/test_azure_helper.py | 19 +++++++++++++++ 4 files changed, 80 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 87a848ce..24f448c5 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -33,7 +33,8 @@ from cloudinit.sources.helpers.azure import ( get_boot_telemetry, get_system_info, report_diagnostic_event, - EphemeralDHCPv4WithReporting) + EphemeralDHCPv4WithReporting, + is_byte_swapped) LOG = logging.getLogger(__name__) @@ -471,8 +472,7 @@ class DataSourceAzure(sources.DataSource): seed = _get_random_seed() if seed: crawled_data['metadata']['random_seed'] = seed - crawled_data['metadata']['instance-id'] = util.read_dmi_data( - 'system-uuid') + crawled_data['metadata']['instance-id'] = self._iid() if perform_reprovision: LOG.info("Reporting ready to Azure after getting ReprovisionData") @@ -558,6 +558,16 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def _iid(self, previous=None): + prev_iid_path = os.path.join( + self.paths.get_cpath('data'), 'instance-id') + iid = util.read_dmi_data('system-uuid') + if os.path.exists(prev_iid_path): + previous = util.load_file(prev_iid_path).strip() + if is_byte_swapped(previous, iid): + return previous + return iid + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index f5cdb3fd..fc760581 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -7,6 +7,7 @@ import re import socket import struct import time +import textwrap from cloudinit.net import dhcp from cloudinit import stages @@ -48,6 +49,32 @@ def azure_ds_telemetry_reporter(func): return impl +def is_byte_swapped(previous_id, current_id): + """ + Azure stores the instance ID with an incorrect byte ordering for the + first parts. This corrects the byte order such that it is consistent with + that returned by the metadata service. + """ + if previous_id == current_id: + return False + + def swap_bytestring(s, width=2): + dd = [byte for byte in textwrap.wrap(s, 2)] + dd.reverse() + return ''.join(dd) + + parts = current_id.split('-') + swapped_id = '-'.join([ + swap_bytestring(parts[0]), + swap_bytestring(parts[1]), + swap_bytestring(parts[2]), + parts[3], + parts[4] + ]) + + return previous_id == swapped_id + + @azure_ds_telemetry_reporter def get_boot_telemetry(): """Report timestamps related to kernel initialization and systemd diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 59e351de..a809fd87 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -477,7 +477,7 @@ scbus-1 on xpt0 bus 0 'public-keys': [], }) - self.instance_id = 'test-instance-id' + self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' def _dmi_mocks(key): if key == 'system-uuid': @@ -645,7 +645,7 @@ scbus-1 on xpt0 bus 0 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, 'imds': NETWORK_METADATA, - 'instance-id': 'test-instance-id', + 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -1091,6 +1091,24 @@ scbus-1 on xpt0 bus 0 self.assertTrue(ret) self.assertEqual('value', dsrc.metadata['test']) + def test_instance_id_endianness(self): + """Return the previous iid when dmi uuid is the byteswapped iid.""" + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + # byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + # not byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + def test_instance_id_from_dmidecode_used(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) ds.get_data() @@ -1292,7 +1310,7 @@ class TestAzureBounce(CiTestCase): def _dmi_mocks(key): if key == 'system-uuid': - return 'test-instance-id' + return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' elif key == 'chassis-asset-tag': return '7783-7084-3265-9085-8269-3286-77' raise RuntimeError('should not get here') diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index bd17f636..007df09f 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -170,6 +170,25 @@ class TestGoalStateParsing(CiTestCase): goal_state = self._get_goal_state(instance_id=instance_id) self.assertEqual(instance_id, goal_state.instance_id) + def test_instance_id_byte_swap(self): + """Return true when previous_iid is byteswapped current_iid""" + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" + self.assertTrue( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_same_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_diff_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + def test_certificates_xml_parsed_and_fetched_correctly(self): http_client = mock.MagicMock() certificates_url = 'TestCertificatesUrl' -- cgit v1.2.3 From 8116493950e7c47af0ce66fc1bb5d799ce5e477a Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 18 Dec 2019 16:22:02 -0500 Subject: cloud-init: fix capitalisation of SSH (#126) * cc_ssh: fix capitalisation of SSH * doc: fix capitalisation of SSH * cc_keys_to_console: fix capitalisation of SSH * ssh_util: fix capitalisation of SSH * DataSourceIBMCloud: fix capitalisation of SSH * DataSourceAzure: fix capitalisation of SSH * cs_utils: fix capitalisation of SSH * distros/__init__: fix capitalisation of SSH * cc_set_passwords: fix capitalisation of SSH * cc_ssh_import_id: fix capitalisation of SSH * cc_users_groups: fix capitalisation of SSH * cc_ssh_authkey_fingerprints: fix capitalisation of SSH --- cloudinit/config/cc_keys_to_console.py | 6 +++--- cloudinit/config/cc_set_passwords.py | 6 +++--- cloudinit/config/cc_ssh.py | 12 ++++++------ cloudinit/config/cc_ssh_authkey_fingerprints.py | 6 +++--- cloudinit/config/cc_ssh_import_id.py | 8 ++++---- cloudinit/config/cc_users_groups.py | 6 +++--- cloudinit/config/tests/test_set_passwords.py | 4 ++-- cloudinit/cs_utils.py | 2 +- cloudinit/distros/__init__.py | 4 ++-- cloudinit/sources/DataSourceAzure.py | 6 +++--- cloudinit/sources/DataSourceIBMCloud.py | 2 +- cloudinit/ssh_util.py | 8 ++++---- doc/examples/cloud-config-ssh-keys.txt | 4 +--- doc/rtd/topics/datasources.rst | 2 +- doc/rtd/topics/datasources/cloudstack.rst | 2 +- doc/rtd/topics/examples.rst | 2 +- doc/rtd/topics/format.rst | 2 +- doc/rtd/topics/instancedata.rst | 2 +- tests/unittests/test_distros/test_create_users.py | 2 +- 19 files changed, 42 insertions(+), 44 deletions(-) (limited to 'cloudinit/sources/DataSourceAzure.py') diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index 8f8735ce..3d2ded3d 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -9,10 +9,10 @@ """ Keys to Console --------------- -**Summary:** control which ssh keys may be written to console +**Summary:** control which SSH keys may be written to console -For security reasons it may be desirable not to write ssh fingerprints and keys -to the console. To avoid the fingerprint of types of ssh keys being written to +For security reasons it may be desirable not to write SSH fingerprints and keys +to the console. To avoid the fingerprint of types of SSH keys being written to console the ``ssh_fp_console_blacklist`` config key can be used. By default all types of keys will have their fingerprints written to console. To avoid keys of a key type being written to console the ``ssh_key_console_blacklist`` config diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index c3c5b0ff..e3b39d8b 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -112,7 +112,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): elif util.is_false(pw_auth): cfg_val = 'no' else: - bmsg = "Leaving ssh config '%s' unchanged." % cfg_name + bmsg = "Leaving SSH config '%s' unchanged." % cfg_name if pw_auth is None or pw_auth.lower() == 'unchanged': LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) else: @@ -121,7 +121,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): updated = update_ssh_config({cfg_name: cfg_val}) if not updated: - LOG.debug("No need to restart ssh service, %s not updated.", cfg_name) + LOG.debug("No need to restart SSH service, %s not updated.", cfg_name) return if 'systemctl' in service_cmd: @@ -129,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): else: cmd = list(service_cmd) + [service_name, "restart"] util.subp(cmd) - LOG.debug("Restarted the ssh daemon.") + LOG.debug("Restarted the SSH daemon.") def handle(_name, cfg, cloud, log, args): diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index bb26fb2b..163cce99 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -9,9 +9,9 @@ """ SSH --- -**Summary:** configure ssh and ssh keys (host and authorized) +**Summary:** configure SSH and SSH keys (host and authorized) -This module handles most configuration for ssh and both host and authorized ssh +This module handles most configuration for SSH and both host and authorized SSH keys. Authorized Keys @@ -24,7 +24,7 @@ account's home directory. Authorized keys for the default user defined in should be specified as a list of public keys. .. note:: - see the ``cc_set_passwords`` module documentation to enable/disable ssh + see the ``cc_set_passwords`` module documentation to enable/disable SSH password authentication Root login can be enabled/disabled using the ``disable_root`` config key. Root @@ -39,7 +39,7 @@ Host Keys ^^^^^^^^^ Host keys are for authenticating a specific instance. Many images have default -host ssh keys, which can be removed using ``ssh_deletekeys``. This prevents +host SSH keys, which can be removed using ``ssh_deletekeys``. This prevents re-use of a private host key from an image on multiple machines. Since removing default host keys is usually the desired behavior this option is enabled by default. @@ -225,7 +225,7 @@ def handle(_name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True): keys = cloud.get_public_ssh_keys() or [] else: - log.debug('Skipping import of publish ssh keys per ' + log.debug('Skipping import of publish SSH keys per ' 'config setting: allow_public_ssh_keys=False') if "ssh_authorized_keys" in cfg: @@ -234,7 +234,7 @@ def handle(_name, cfg, cloud, log, _args): apply_credentials(keys, user, disable_root, disable_root_opts) except Exception: - util.logexc(log, "Applying ssh credentials failed!") + util.logexc(log, "Applying SSH credentials failed!") def apply_credentials(keys, user, disable_root, disable_root_opts): diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 98b0e665..dcf86fdc 100755 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -7,7 +7,7 @@ """ SSH Authkey Fingerprints ------------------------ -**Summary:** log fingerprints of user ssh keys +**Summary:** log fingerprints of user SSH keys Write fingerprints of authorized keys for each user to log. This is enabled by default, but can be disabled using ``no_ssh_fingerprints``. The hash type for @@ -68,7 +68,7 @@ def _is_printable_key(entry): def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', prefix='ci-info: '): if not key_entries: - message = ("%sno authorized ssh keys fingerprints found for user %s.\n" + message = ("%sno authorized SSH keys fingerprints found for user %s.\n" % (prefix, user)) util.multi_log(message) return @@ -98,7 +98,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', def handle(name, cfg, cloud, log, _args): if util.is_true(cfg.get('no_ssh_fingerprints', False)): log.debug(("Skipping module named %s, " - "logging of ssh fingerprints disabled"), name) + "logging of SSH fingerprints disabled"), name) return hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 6b46dafe..63f87298 100755 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -9,9 +9,9 @@ """ SSH Import Id ------------- -**Summary:** import ssh id +**Summary:** import SSH id -This module imports ssh keys from either a public keyserver, usually launchpad +This module imports SSH keys from either a public keyserver, usually launchpad or github using ``ssh-import-id``. Keys are referenced by the username they are associated with on the keyserver. The keyserver can be specified by prepending either ``lp:`` for launchpad or ``gh:`` for github to the username. @@ -98,12 +98,12 @@ def import_ssh_ids(ids, user, log): raise exc cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids - log.debug("Importing ssh ids for user %s.", user) + log.debug("Importing SSH ids for user %s.", user) try: util.subp(cmd, capture=False) except util.ProcessExecutionError as exc: - util.logexc(log, "Failed to run command to import %s ssh ids", user) + util.logexc(log, "Failed to run command to import %s SSH ids", user) raise exc # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index c32a743a..13764e60 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -51,14 +51,14 @@ config keys for an entry in ``users`` are as follows: a Snappy user through ``snap create-user``. If an Ubuntu SSO account is associated with the address, username and SSH keys will be requested from there. Default: none - - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's + - ``ssh_authorized_keys``: Optional. List of SSH keys to add to user's authkeys file. Default: none. This key can not be combined with ``ssh_redirect_user``. - ``ssh_import_id``: Optional. SSH id to import for user. Default: none. This key can not be combined with ``ssh_redirect_user``. - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH - logins for this user. When specified, all cloud meta-data public ssh - keys will be set up in a disabled state for this username. Any ssh login + logins for this user. When specified, all cloud meta-data public SSH + keys will be set up in a disabled state for this username. Any SSH login as this username will timeout and prompt with a message to login instead as the configured for this instance. Default: false. This key can not be combined with ``ssh_import_id`` or diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index 639fb9ea..85e2f1fe 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -45,7 +45,7 @@ class TestHandleSshPwauth(CiTestCase): """If config is not updated, then no system restart should be done.""" setpass.handle_ssh_pwauth(True) m_subp.assert_not_called() - self.assertIn("No need to restart ssh", self.logs.getvalue()) + self.assertIn("No need to restart SSH", self.logs.getvalue()) @mock.patch(MODPATH + "update_ssh_config", return_value=True) @mock.patch(MODPATH + "util.subp") @@ -80,7 +80,7 @@ class TestSetPasswordsHandle(CiTestCase): setpass.handle( 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[]) self.assertEqual( - "DEBUG: Leaving ssh config 'PasswordAuthentication' unchanged. " + "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. " 'ssh_pwauth=None\n', self.logs.getvalue()) diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py index 51c09582..8bac9c44 100644 --- a/cloudinit/cs_utils.py +++ b/cloudinit/cs_utils.py @@ -14,7 +14,7 @@ Having the server definition accessible by the VM can ve useful in various ways. For example it is possible to easily determine from within the VM, which network interfaces are connected to public and which to private network. Another use is to pass some data to initial VM setup scripts, like setting the -hostname to the VM name or passing ssh public keys through server meta. +hostname to the VM name or passing SSH public keys through server meta. For more information take a look at the Server Context section of CloudSigma API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2b559fe6..6d69e6ca 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -385,7 +385,7 @@ class Distro(object): Add a user to the system using standard GNU tools """ # XXX need to make add_user idempotent somehow as we - # still want to add groups or modify ssh keys on pre-existing + # still want to add groups or modify SSH keys on pre-existing # users in the image. if util.is_user(name): LOG.info("User %s already exists, skipping.", name) @@ -561,7 +561,7 @@ class Distro(object): cloud_keys = kwargs.get('cloud_public_ssh_keys', []) if not cloud_keys: LOG.warning( - 'Unable to disable ssh logins for %s given' + 'Unable to disable SSH logins for %s given' ' ssh_redirect_user: %s. No cloud public-keys present.', name, kwargs['ssh_redirect_user']) else: diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 24f448c5..61ec522a 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -355,16 +355,16 @@ class DataSourceAzure(sources.DataSource): for pk in self.cfg.get('_pubkeys', []): if pk.get('value', None): key_value = pk['value'] - LOG.debug("ssh authentication: using value from fabric") + LOG.debug("SSH authentication: using value from fabric") else: bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] - LOG.debug("ssh authentication: " + LOG.debug("SSH authentication: " "using fingerprint from fabric") with events.ReportEventStack( name="waiting-for-ssh-public-key", - description="wait for agents to retrieve ssh keys", + description="wait for agents to retrieve SSH keys", parent=azure_ds_reporter): # wait very long for public SSH keys to arrive # https://bugs.launchpad.net/cloud-init/+bug/1717611 diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index 21e6ae6b..e0c714e8 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -83,7 +83,7 @@ creates 6 boot scenarios. There is no information available to identify this scenario. - The user will be able to ssh in as as root with their public keys that + The user will be able to SSH in as as root with their public keys that have been installed into /root/ssh/.authorized_keys during the provisioning stage. diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index bcb23a5a..c3a9b5b7 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -17,7 +17,7 @@ LOG = logging.getLogger(__name__) # See: man sshd_config DEF_SSHD_CFG = "/etc/ssh/sshd_config" -# taken from openssh source openssh-7.3p1/sshkey.c: +# taken from OpenSSH source openssh-7.3p1/sshkey.c: # static const struct keytype keytypes[] = { ... } VALID_KEY_TYPES = ( "dsa", @@ -207,7 +207,7 @@ def update_authorized_keys(old_entries, keys): def users_ssh_info(username): pw_ent = pwd.getpwnam(username) if not pw_ent or not pw_ent.pw_dir: - raise RuntimeError("Unable to get ssh info for user %r" % (username)) + raise RuntimeError("Unable to get SSH info for user %r" % (username)) return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent) @@ -245,7 +245,7 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): except (IOError, OSError): # Give up and use a default key filename auth_key_fns[0] = default_authorizedkeys_file - util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh " + util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in SSH " "config from %r, using 'AuthorizedKeysFile' file " "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) @@ -349,7 +349,7 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG): def update_ssh_config_lines(lines, updates): - """Update the ssh config lines per updates. + """Update the SSH config lines per updates. @param lines: array of SshdConfigLine. This array is updated in place. @param updates: dictionary of desired values {Option: value} diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt index 235a114f..aad8b683 100644 --- a/doc/examples/cloud-config-ssh-keys.txt +++ b/doc/examples/cloud-config-ssh-keys.txt @@ -6,7 +6,7 @@ ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies -# Send pre-generated ssh private keys to the server +# Send pre-generated SSH private keys to the server # If these are present, they will be written to /etc/ssh and # new random keys will not be generated # in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported @@ -42,5 +42,3 @@ ssh_keys: -----END DSA PRIVATE KEY----- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost - - diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 0b3a385e..3d026143 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -140,7 +140,7 @@ The current interface that a datasource object must provide is the following: # because cloud-config content would be handled elsewhere def get_config_obj(self) - #returns a list of public ssh keys + # returns a list of public SSH keys def get_public_ssh_keys(self) # translates a device 'short' name into the actual physical device diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst index 95b95874..da183226 100644 --- a/doc/rtd/topics/datasources/cloudstack.rst +++ b/doc/rtd/topics/datasources/cloudstack.rst @@ -4,7 +4,7 @@ CloudStack ========== `Apache CloudStack`_ expose user-data, meta-data, user password and account -sshkey thru the Virtual-Router. The datasource obtains the VR address via +SSH key thru the Virtual-Router. The datasource obtains the VR address via dhcp lease information given to the instance. For more details on meta-data and user-data, refer the `CloudStack Administrator Guide`_. diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst index 94e6ed18..81860f85 100644 --- a/doc/rtd/topics/examples.rst +++ b/doc/rtd/topics/examples.rst @@ -128,7 +128,7 @@ Reboot/poweroff when finished :language: yaml :linenos: -Configure instances ssh-keys +Configure instances SSH keys ============================ .. literalinclude:: ../../examples/cloud-config-ssh-keys.txt diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index a6e9b44e..2b60bdd3 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -113,7 +113,7 @@ These things include: - apt upgrade should be run on first boot - a different apt mirror should be used - additional apt sources should be added -- certain ssh keys should be imported +- certain SSH keys should be imported - *and many more...* .. note:: diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index c17d0a0e..e7dd0d62 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -165,7 +165,7 @@ Examples output: v1.public_ssh_keys ------------------ -A list of ssh keys provided to the instance by the datasource metadata. +A list of SSH keys provided to the instance by the datasource metadata. Examples output: diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index 40624952..ef11784d 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -206,7 +206,7 @@ class TestCreateUser(CiTestCase): user = 'foouser' self.dist.create_user(user, ssh_redirect_user='someuser') self.assertIn( - 'WARNING: Unable to disable ssh logins for foouser given ' + 'WARNING: Unable to disable SSH logins for foouser given ' 'ssh_redirect_user: someuser. No cloud public-keys present.\n', self.logs.getvalue()) m_setup_user_keys.assert_not_called() -- cgit v1.2.3