diff options
Diffstat (limited to 'cloudinit/sources/helpers')
24 files changed, 1582 insertions, 1357 deletions
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index d3055d08..d07dc3c0 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -6,27 +6,28 @@ import os import re import socket import struct -import time import textwrap +import time import zlib -from errno import ENOENT - -from cloudinit.settings import CFG_BUILTIN -from cloudinit.net import dhcp -from cloudinit import stages -from cloudinit import temp_utils from contextlib import contextmanager +from datetime import datetime +from errno import ENOENT +from typing import List, Optional from xml.etree import ElementTree from xml.sax.saxutils import escape -from cloudinit import subp -from cloudinit import url_helper -from cloudinit import util -from cloudinit import version -from cloudinit import distros +from cloudinit import ( + distros, + stages, + subp, + temp_utils, + url_helper, + util, + version, +) +from cloudinit.net import dhcp from cloudinit.reporting import events -from cloudinit.net.dhcp import EphemeralDHCPv4 -from datetime import datetime +from cloudinit.settings import CFG_BUILTIN LOG = logging.getLogger(__name__) @@ -34,10 +35,10 @@ LOG = logging.getLogger(__name__) # value is applied if the endpoint can't be found within a lease file DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" -BOOT_EVENT_TYPE = 'boot-telemetry' -SYSTEMINFO_EVENT_TYPE = 'system-info' -DIAGNOSTIC_EVENT_TYPE = 'diagnostic' -COMPRESSED_EVENT_TYPE = 'compressed' +BOOT_EVENT_TYPE = "boot-telemetry" +SYSTEMINFO_EVENT_TYPE = "system-info" +DIAGNOSTIC_EVENT_TYPE = "diagnostic" +COMPRESSED_EVENT_TYPE = "compressed" # Maximum number of bytes of the cloud-init.log file that can be dumped to KVP # at once. This number is based on the analysis done on a large sample of # cloud-init.log files where the P95 of the file sizes was 537KB and the time @@ -45,25 +46,29 @@ COMPRESSED_EVENT_TYPE = 'compressed' MAX_LOG_TO_KVP_LENGTH = 512000 # File to store the last byte of cloud-init.log that was pushed to KVP. This # file will be deleted with every VM reboot. -LOG_PUSHED_TO_KVP_INDEX_FILE = '/run/cloud-init/log_pushed_to_kvp_index' +LOG_PUSHED_TO_KVP_INDEX_FILE = "/run/cloud-init/log_pushed_to_kvp_index" azure_ds_reporter = events.ReportEventStack( name="azure-ds", description="initialize reporter for azure ds", - reporting_enabled=True) + reporting_enabled=True, +) DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE = ( - 'The VM encountered an error during deployment. ' - 'Please visit https://aka.ms/linuxprovisioningerror ' - 'for more information on remediation.') + "The VM encountered an error during deployment. " + "Please visit https://aka.ms/linuxprovisioningerror " + "for more information on remediation." +) def azure_ds_telemetry_reporter(func): def impl(*args, **kwargs): with events.ReportEventStack( - name=func.__name__, - description=func.__name__, - parent=azure_ds_reporter): + name=func.__name__, + description=func.__name__, + parent=azure_ds_reporter, + ): return func(*args, **kwargs) + return impl @@ -79,16 +84,16 @@ def is_byte_swapped(previous_id, current_id): def swap_bytestring(s, width=2): dd = [byte for byte in textwrap.wrap(s, 2)] dd.reverse() - return ''.join(dd) + return "".join(dd) - parts = current_id.split('-') - swapped_id = '-'.join( + parts = current_id.split("-") + swapped_id = "-".join( [ swap_bytestring(parts[0]), swap_bytestring(parts[1]), swap_bytestring(parts[2]), parts[3], - parts[4] + parts[4], ] ) @@ -98,31 +103,29 @@ def is_byte_swapped(previous_id, current_id): @azure_ds_telemetry_reporter def get_boot_telemetry(): """Report timestamps related to kernel initialization and systemd - activation of cloud-init""" + activation of cloud-init""" if not distros.uses_systemd(): - raise RuntimeError( - "distro not using systemd, skipping boot telemetry") + raise RuntimeError("distro not using systemd, skipping boot telemetry") LOG.debug("Collecting boot telemetry") try: kernel_start = float(time.time()) - float(util.uptime()) except ValueError as e: - raise RuntimeError( - "Failed to determine kernel start timestamp" - ) from e + raise RuntimeError("Failed to determine kernel start timestamp") from e try: - out, _ = subp.subp(['/bin/systemctl', - 'show', '-p', - 'UserspaceTimestampMonotonic'], - capture=True) + out, _ = subp.subp( + ["/bin/systemctl", "show", "-p", "UserspaceTimestampMonotonic"], + capture=True, + ) tsm = None - if out and '=' in out: + if out and "=" in out: tsm = out.split("=")[1] if not tsm: - raise RuntimeError("Failed to parse " - "UserspaceTimestampMonotonic from systemd") + raise RuntimeError( + "Failed to parse UserspaceTimestampMonotonic from systemd" + ) user_start = kernel_start + (float(tsm) / 1000000) except subp.ProcessExecutionError as e: @@ -135,16 +138,23 @@ def get_boot_telemetry(): ) from e try: - out, _ = subp.subp(['/bin/systemctl', 'show', - 'cloud-init-local', '-p', - 'InactiveExitTimestampMonotonic'], - capture=True) + out, _ = subp.subp( + [ + "/bin/systemctl", + "show", + "cloud-init-local", + "-p", + "InactiveExitTimestampMonotonic", + ], + capture=True, + ) tsm = None - if out and '=' in out: + if out and "=" in out: tsm = out.split("=")[1] if not tsm: - raise RuntimeError("Failed to parse " - "InactiveExitTimestampMonotonic from systemd") + raise RuntimeError( + "Failed to parse InactiveExitTimestampMonotonic from systemd" + ) cloudinit_activation = kernel_start + (float(tsm) / 1000000) except subp.ProcessExecutionError as e: @@ -158,12 +168,16 @@ def get_boot_telemetry(): ) from e evt = events.ReportingEvent( - BOOT_EVENT_TYPE, 'boot-telemetry', - "kernel_start=%s user_start=%s cloudinit_activation=%s" % - (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z', - datetime.utcfromtimestamp(user_start).isoformat() + 'Z', - datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'), - events.DEFAULT_EVENT_ORIGIN) + BOOT_EVENT_TYPE, + "boot-telemetry", + "kernel_start=%s user_start=%s cloudinit_activation=%s" + % ( + datetime.utcfromtimestamp(kernel_start).isoformat() + "Z", + datetime.utcfromtimestamp(user_start).isoformat() + "Z", + datetime.utcfromtimestamp(cloudinit_activation).isoformat() + "Z", + ), + events.DEFAULT_EVENT_ORIGIN, + ) events.report_event(evt) # return the event for unit testing purpose @@ -175,13 +189,22 @@ def get_system_info(): """Collect and report system information""" info = util.system_info() evt = events.ReportingEvent( - SYSTEMINFO_EVENT_TYPE, 'system information', + SYSTEMINFO_EVENT_TYPE, + "system information", "cloudinit_version=%s, kernel_version=%s, variant=%s, " "distro_name=%s, distro_version=%s, flavor=%s, " - "python_version=%s" % - (version.version_string(), info['release'], info['variant'], - info['dist'][0], info['dist'][1], info['dist'][2], - info['python']), events.DEFAULT_EVENT_ORIGIN) + "python_version=%s" + % ( + version.version_string(), + info["release"], + info["variant"], + info["dist"][0], + info["dist"][1], + info["dist"][2], + info["python"], + ), + events.DEFAULT_EVENT_ORIGIN, + ) events.report_event(evt) # return the event for unit testing purpose @@ -189,13 +212,17 @@ def get_system_info(): def report_diagnostic_event( - msg: str, *, logger_func=None) -> events.ReportingEvent: + msg: str, *, logger_func=None +) -> events.ReportingEvent: """Report a diagnostic event""" if callable(logger_func): logger_func(msg) evt = events.ReportingEvent( - DIAGNOSTIC_EVENT_TYPE, 'diagnostic message', - msg, events.DEFAULT_EVENT_ORIGIN) + DIAGNOSTIC_EVENT_TYPE, + "diagnostic message", + msg, + events.DEFAULT_EVENT_ORIGIN, + ) events.report_event(evt, excluded_handler_types={"log"}) # return the event for unit testing purpose @@ -205,21 +232,26 @@ def report_diagnostic_event( def report_compressed_event(event_name, event_content): """Report a compressed event""" compressed_data = base64.encodebytes(zlib.compress(event_content)) - event_data = {"encoding": "gz+b64", - "data": compressed_data.decode('ascii')} + event_data = { + "encoding": "gz+b64", + "data": compressed_data.decode("ascii"), + } evt = events.ReportingEvent( - COMPRESSED_EVENT_TYPE, event_name, + COMPRESSED_EVENT_TYPE, + event_name, json.dumps(event_data), - events.DEFAULT_EVENT_ORIGIN) - events.report_event(evt, - excluded_handler_types={"log", "print", "webhook"}) + events.DEFAULT_EVENT_ORIGIN, + ) + events.report_event( + evt, excluded_handler_types={"log", "print", "webhook"} + ) # return the event for unit testing purpose return evt @azure_ds_telemetry_reporter -def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']): +def push_log_to_kvp(file_name=CFG_BUILTIN["def_log_file"]): """Push a portion of cloud-init.log file or the whole file to KVP based on the file size. The first time this function is called after VM boot, It will push the last @@ -237,23 +269,26 @@ def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']): report_diagnostic_event( "Dumping last {0} bytes of cloud-init.log file to KVP starting" " from index: {1}".format(f.tell() - seek_index, seek_index), - logger_func=LOG.debug) + logger_func=LOG.debug, + ) f.seek(seek_index, os.SEEK_SET) report_compressed_event("cloud-init.log", f.read()) util.write_file(LOG_PUSHED_TO_KVP_INDEX_FILE, str(f.tell())) except Exception as ex: report_diagnostic_event( "Exception when dumping log file: %s" % repr(ex), - logger_func=LOG.warning) + logger_func=LOG.warning, + ) LOG.debug("Dumping dmesg log to KVP") try: - out, _ = subp.subp(['dmesg'], decode=False, capture=True) + out, _ = subp.subp(["dmesg"], decode=False, capture=True) report_compressed_event("dmesg", out) except Exception as ex: report_diagnostic_event( "Exception when dumping dmesg log: %s" % repr(ex), - logger_func=LOG.warning) + logger_func=LOG.warning, + ) @azure_ds_telemetry_reporter @@ -263,16 +298,20 @@ def get_last_log_byte_pushed_to_kvp_index(): return int(f.read()) except IOError as e: if e.errno != ENOENT: - report_diagnostic_event("Reading LOG_PUSHED_TO_KVP_INDEX_FILE" - " failed: %s." % repr(e), - logger_func=LOG.warning) + report_diagnostic_event( + "Reading LOG_PUSHED_TO_KVP_INDEX_FILE failed: %s." % repr(e), + logger_func=LOG.warning, + ) except ValueError as e: - report_diagnostic_event("Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE" - ": %s." % repr(e), - logger_func=LOG.warning) + report_diagnostic_event( + "Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE: %s." % repr(e), + logger_func=LOG.warning, + ) except Exception as e: - report_diagnostic_event("Failed to get the last log byte pushed to KVP" - ": %s." % repr(e), logger_func=LOG.warning) + report_diagnostic_event( + "Failed to get the last log byte pushed to KVP: %s." % repr(e), + logger_func=LOG.warning, + ) return 0 @@ -295,58 +334,97 @@ def _get_dhcp_endpoint_option_name(): @azure_ds_telemetry_reporter -def http_with_retries(url, **kwargs) -> str: +def http_with_retries(url, **kwargs) -> url_helper.UrlResponse: """Wrapper around url_helper.readurl() with custom telemetry logging that url_helper.readurl() does not provide. """ - exc = None - max_readurl_attempts = 240 default_readurl_timeout = 5 + sleep_duration_between_retries = 5 periodic_logging_attempts = 12 - if 'timeout' not in kwargs: - kwargs['timeout'] = default_readurl_timeout + if "timeout" not in kwargs: + kwargs["timeout"] = default_readurl_timeout # remove kwargs that cause url_helper.readurl to retry, # since we are already implementing our own retry logic. - if kwargs.pop('retries', None): + if kwargs.pop("retries", None): LOG.warning( - 'Ignoring retries kwarg passed in for ' - 'communication with Azure endpoint.') - if kwargs.pop('infinite', None): + "Ignoring retries kwarg passed in for " + "communication with Azure endpoint." + ) + if kwargs.pop("infinite", None): LOG.warning( - 'Ignoring infinite kwarg passed in for communication ' - 'with Azure endpoint.') + "Ignoring infinite kwarg passed in for communication " + "with Azure endpoint." + ) for attempt in range(1, max_readurl_attempts + 1): try: ret = url_helper.readurl(url, **kwargs) report_diagnostic_event( - 'Successful HTTP request with Azure endpoint %s after ' - '%d attempts' % (url, attempt), - logger_func=LOG.debug) + "Successful HTTP request with Azure endpoint %s after " + "%d attempts" % (url, attempt), + logger_func=LOG.debug, + ) return ret except Exception as e: - exc = e if attempt % periodic_logging_attempts == 0: report_diagnostic_event( - 'Failed HTTP request with Azure endpoint %s during ' - 'attempt %d with exception: %s' % - (url, attempt, e), - logger_func=LOG.debug) - - raise exc + "Failed HTTP request with Azure endpoint %s during " + "attempt %d with exception: %s" % (url, attempt, e), + logger_func=LOG.debug, + ) + if attempt == max_readurl_attempts: + raise + + time.sleep(sleep_duration_between_retries) + + raise RuntimeError("Failed to return in http_with_retries") + + +def build_minimal_ovf( + username: str, hostname: str, disableSshPwd: str +) -> bytes: + OVF_ENV_TEMPLATE = textwrap.dedent( + """\ + <ns0:Environment xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1" + xmlns:ns1="http://schemas.microsoft.com/windowsazure" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> + <ns1:ProvisioningSection> + <ns1:Version>1.0</ns1:Version> + <ns1:LinuxProvisioningConfigurationSet> + <ns1:ConfigurationSetType>LinuxProvisioningConfiguration + </ns1:ConfigurationSetType> + <ns1:UserName>{username}</ns1:UserName> + <ns1:DisableSshPasswordAuthentication>{disableSshPwd} + </ns1:DisableSshPasswordAuthentication> + <ns1:HostName>{hostname}</ns1:HostName> + </ns1:LinuxProvisioningConfigurationSet> + </ns1:ProvisioningSection> + <ns1:PlatformSettingsSection> + <ns1:Version>1.0</ns1:Version> + <ns1:PlatformSettings> + <ns1:ProvisionGuestAgent>true</ns1:ProvisionGuestAgent> + </ns1:PlatformSettings> + </ns1:PlatformSettingsSection> + </ns0:Environment> + """ + ) + ret = OVF_ENV_TEMPLATE.format( + username=username, hostname=hostname, disableSshPwd=disableSshPwd + ) + return ret.encode("utf-8") class AzureEndpointHttpClient: headers = { - 'x-ms-agent-name': 'WALinuxAgent', - 'x-ms-version': '2012-11-30', + "x-ms-agent-name": "WALinuxAgent", + "x-ms-version": "2012-11-30", } def __init__(self, certificate): @@ -355,20 +433,21 @@ class AzureEndpointHttpClient: "x-ms-guest-agent-public-x509-cert": certificate, } - def get(self, url, secure=False): + def get(self, url, secure=False) -> url_helper.UrlResponse: headers = self.headers if secure: headers = self.headers.copy() headers.update(self.extra_secure_headers) return http_with_retries(url, headers=headers) - def post(self, url, data=None, extra_headers=None): + def post( + self, url, data=None, extra_headers=None + ) -> url_helper.UrlResponse: headers = self.headers if extra_headers is not None: headers = self.headers.copy() headers.update(extra_headers) - return http_with_retries( - url, data=data, headers=headers) + return http_with_retries(url, data=data, headers=headers) class InvalidGoalStateXMLException(Exception): @@ -376,12 +455,12 @@ class InvalidGoalStateXMLException(Exception): class GoalState: - def __init__( - self, - unparsed_xml: str, - azure_endpoint_client: AzureEndpointHttpClient, - need_certificate: bool = True) -> None: + self, + unparsed_xml: str, + azure_endpoint_client: AzureEndpointHttpClient, + need_certificate: bool = True, + ) -> None: """Parses a GoalState XML string and returns a GoalState object. @param unparsed_xml: string representing a GoalState XML. @@ -395,36 +474,41 @@ class GoalState: self.root = ElementTree.fromstring(unparsed_xml) except ElementTree.ParseError as e: report_diagnostic_event( - 'Failed to parse GoalState XML: %s' % e, - logger_func=LOG.warning) + "Failed to parse GoalState XML: %s" % e, + logger_func=LOG.warning, + ) raise - self.container_id = self._text_from_xpath('./Container/ContainerId') + self.container_id = self._text_from_xpath("./Container/ContainerId") self.instance_id = self._text_from_xpath( - './Container/RoleInstanceList/RoleInstance/InstanceId') - self.incarnation = self._text_from_xpath('./Incarnation') + "./Container/RoleInstanceList/RoleInstance/InstanceId" + ) + self.incarnation = self._text_from_xpath("./Incarnation") for attr in ("container_id", "instance_id", "incarnation"): if getattr(self, attr) is None: - msg = 'Missing %s in GoalState XML' % attr + msg = "Missing %s in GoalState XML" % attr report_diagnostic_event(msg, logger_func=LOG.warning) raise InvalidGoalStateXMLException(msg) self.certificates_xml = None url = self._text_from_xpath( - './Container/RoleInstanceList/RoleInstance' - '/Configuration/Certificates') + "./Container/RoleInstanceList/RoleInstance" + "/Configuration/Certificates" + ) if url is not None and need_certificate: with events.ReportEventStack( - name="get-certificates-xml", - description="get certificates xml", - parent=azure_ds_reporter): - self.certificates_xml = \ - self.azure_endpoint_client.get( - url, secure=True).contents + name="get-certificates-xml", + description="get certificates xml", + parent=azure_ds_reporter, + ): + self.certificates_xml = self.azure_endpoint_client.get( + url, secure=True + ).contents if self.certificates_xml is None: raise InvalidGoalStateXMLException( - 'Azure endpoint returned empty certificates xml.') + "Azure endpoint returned empty certificates xml." + ) def _text_from_xpath(self, xpath): element = self.root.find(xpath) @@ -436,8 +520,8 @@ class GoalState: class OpenSSLManager: certificate_names = { - 'private_key': 'TransportPrivate.pem', - 'certificate': 'TransportCert.pem', + "private_key": "TransportPrivate.pem", + "certificate": "TransportCert.pem", } def __init__(self): @@ -458,35 +542,47 @@ class OpenSSLManager: @azure_ds_telemetry_reporter def generate_certificate(self): - LOG.debug('Generating certificate for communication with fabric...') + LOG.debug("Generating certificate for communication with fabric...") if self.certificate is not None: - LOG.debug('Certificate already generated.') + LOG.debug("Certificate already generated.") return with cd(self.tmpdir): - subp.subp([ - 'openssl', 'req', '-x509', '-nodes', '-subj', - '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048', - '-keyout', self.certificate_names['private_key'], - '-out', self.certificate_names['certificate'], - ]) - certificate = '' - for line in open(self.certificate_names['certificate']): + subp.subp( + [ + "openssl", + "req", + "-x509", + "-nodes", + "-subj", + "/CN=LinuxTransport", + "-days", + "32768", + "-newkey", + "rsa:2048", + "-keyout", + self.certificate_names["private_key"], + "-out", + self.certificate_names["certificate"], + ] + ) + certificate = "" + for line in open(self.certificate_names["certificate"]): if "CERTIFICATE" not in line: certificate += line.rstrip() self.certificate = certificate - LOG.debug('New certificate generated.') + LOG.debug("New certificate generated.") @staticmethod @azure_ds_telemetry_reporter def _run_x509_action(action, cert): - cmd = ['openssl', 'x509', '-noout', action] + cmd = ["openssl", "x509", "-noout", action] result, _ = subp.subp(cmd, data=cert) return result @azure_ds_telemetry_reporter def _get_ssh_key_from_cert(self, certificate): - pub_key = self._run_x509_action('-pubkey', certificate) - keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + pub_key = self._run_x509_action("-pubkey", certificate) + keygen_cmd = ["ssh-keygen", "-i", "-m", "PKCS8", "-f", "/dev/stdin"] ssh_key, _ = subp.subp(keygen_cmd, data=pub_key) return ssh_key @@ -499,48 +595,50 @@ class OpenSSLManager: Azure control plane passes that fingerprint as so: '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' """ - raw_fp = self._run_x509_action('-fingerprint', certificate) - eq = raw_fp.find('=') - octets = raw_fp[eq+1:-1].split(':') - return ''.join(octets) + raw_fp = self._run_x509_action("-fingerprint", certificate) + eq = raw_fp.find("=") + octets = raw_fp[eq + 1 : -1].split(":") + return "".join(octets) @azure_ds_telemetry_reporter def _decrypt_certs_from_xml(self, certificates_xml): """Decrypt the certificates XML document using the our private key; - return the list of certs and private keys contained in the doc. + return the list of certs and private keys contained in the doc. """ - tag = ElementTree.fromstring(certificates_xml).find('.//Data') + tag = ElementTree.fromstring(certificates_xml).find(".//Data") certificates_content = tag.text lines = [ - b'MIME-Version: 1.0', + b"MIME-Version: 1.0", b'Content-Disposition: attachment; filename="Certificates.p7m"', b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"', - b'Content-Transfer-Encoding: base64', - b'', - certificates_content.encode('utf-8'), + b"Content-Transfer-Encoding: base64", + b"", + certificates_content.encode("utf-8"), ] with cd(self.tmpdir): out, _ = subp.subp( - 'openssl cms -decrypt -in /dev/stdin -inkey' - ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' - ' -password pass:'.format(**self.certificate_names), - shell=True, data=b'\n'.join(lines)) + "openssl cms -decrypt -in /dev/stdin -inkey" + " {private_key} -recip {certificate} | openssl pkcs12 -nodes" + " -password pass:".format(**self.certificate_names), + shell=True, + data=b"\n".join(lines), + ) return out @azure_ds_telemetry_reporter def parse_certificates(self, certificates_xml): """Given the Certificates XML document, return a dictionary of - fingerprints and associated SSH keys derived from the certs.""" + fingerprints and associated SSH keys derived from the certs.""" out = self._decrypt_certs_from_xml(certificates_xml) current = [] keys = {} for line in out.splitlines(): current.append(line) - if re.match(r'[-]+END .*?KEY[-]+$', line): + if re.match(r"[-]+END .*?KEY[-]+$", line): # ignore private_keys current = [] - elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificate = '\n'.join(current) + elif re.match(r"[-]+END .*?CERTIFICATE[-]+$", line): + certificate = "\n".join(current) ssh_key = self._get_ssh_key_from_cert(certificate) fingerprint = self._get_fingerprint_from_cert(certificate) keys[fingerprint] = ssh_key @@ -550,7 +648,8 @@ class OpenSSLManager: class GoalStateHealthReporter: - HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\ + HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent( + """\ <?xml version="1.0" encoding="utf-8"?> <Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> @@ -568,25 +667,30 @@ class GoalStateHealthReporter: </RoleInstanceList> </Container> </Health> - ''') + """ + ) - HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\ + HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent( + """\ <Details> <SubStatus>{health_substatus}</SubStatus> <Description>{health_description}</Description> </Details> - ''') + """ + ) - PROVISIONING_SUCCESS_STATUS = 'Ready' - PROVISIONING_NOT_READY_STATUS = 'NotReady' - PROVISIONING_FAILURE_SUBSTATUS = 'ProvisioningFailed' + PROVISIONING_SUCCESS_STATUS = "Ready" + PROVISIONING_NOT_READY_STATUS = "NotReady" + PROVISIONING_FAILURE_SUBSTATUS = "ProvisioningFailed" HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512 def __init__( - self, goal_state: GoalState, - azure_endpoint_client: AzureEndpointHttpClient, - endpoint: str) -> None: + self, + goal_state: GoalState, + azure_endpoint_client: AzureEndpointHttpClient, + endpoint: str, + ) -> None: """Creates instance that will report provisioning status to an endpoint @param goal_state: An instance of class GoalState that contains @@ -608,17 +712,19 @@ class GoalStateHealthReporter: incarnation=self._goal_state.incarnation, container_id=self._goal_state.container_id, instance_id=self._goal_state.instance_id, - status=self.PROVISIONING_SUCCESS_STATUS) - LOG.debug('Reporting ready to Azure fabric.') + status=self.PROVISIONING_SUCCESS_STATUS, + ) + LOG.debug("Reporting ready to Azure fabric.") try: self._post_health_report(document=document) except Exception as e: report_diagnostic_event( "exception while reporting ready: %s" % e, - logger_func=LOG.error) + logger_func=LOG.error, + ) raise - LOG.info('Reported ready to Azure fabric.') + LOG.info("Reported ready to Azure fabric.") @azure_ds_telemetry_reporter def send_failure_signal(self, description: str) -> None: @@ -628,7 +734,8 @@ class GoalStateHealthReporter: instance_id=self._goal_state.instance_id, status=self.PROVISIONING_NOT_READY_STATUS, substatus=self.PROVISIONING_FAILURE_SUBSTATUS, - description=description) + description=description, + ) try: self._post_health_report(document=document) except Exception as e: @@ -636,24 +743,33 @@ class GoalStateHealthReporter: report_diagnostic_event(msg, logger_func=LOG.error) raise - LOG.warning('Reported failure to Azure fabric.') + LOG.warning("Reported failure to Azure fabric.") def build_report( - self, incarnation: str, container_id: str, instance_id: str, - status: str, substatus=None, description=None) -> str: - health_detail = '' + self, + incarnation: str, + container_id: str, + instance_id: str, + status: str, + substatus=None, + description=None, + ) -> str: + health_detail = "" if substatus is not None: health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format( health_substatus=escape(substatus), health_description=escape( - description[:self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN])) + description[: self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN] + ), + ) health_report = self.HEALTH_REPORT_XML_TEMPLATE.format( incarnation=escape(str(incarnation)), container_id=escape(container_id), instance_id=escape(instance_id), health_status=escape(status), - health_detail_subsection=health_detail) + health_detail_subsection=health_detail, + ) return health_report @@ -681,20 +797,22 @@ class GoalStateHealthReporter: # reporting handler that writes to the special KVP files. time.sleep(0) - LOG.debug('Sending health report to Azure fabric.') + LOG.debug("Sending health report to Azure fabric.") url = "http://{}/machine?comp=health".format(self._endpoint) self._azure_endpoint_client.post( url, data=document, - extra_headers={'Content-Type': 'text/xml; charset=utf-8'}) - LOG.debug('Successfully sent health report to Azure fabric') + extra_headers={"Content-Type": "text/xml; charset=utf-8"}, + ) + LOG.debug("Successfully sent health report to Azure fabric") class WALinuxAgentShim: - def __init__(self, fallback_lease_file=None, dhcp_options=None): - LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s', - fallback_lease_file) + LOG.debug( + "WALinuxAgentShim instantiated, fallback_lease_file=%s", + fallback_lease_file, + ) self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None @@ -713,30 +831,33 @@ class WALinuxAgentShim: @property def endpoint(self): if self._endpoint is None: - self._endpoint = self.find_endpoint(self.lease_file, - self.dhcpoptions) + self._endpoint = self.find_endpoint( + self.lease_file, self.dhcpoptions + ) return self._endpoint @staticmethod def get_ip_from_lease_value(fallback_lease_value): - unescaped_value = fallback_lease_value.replace('\\', '') + unescaped_value = fallback_lease_value.replace("\\", "") if len(unescaped_value) > 4: - hex_string = '' - for hex_pair in unescaped_value.split(':'): + hex_string = "" + for hex_pair in unescaped_value.split(":"): if len(hex_pair) == 1: - hex_pair = '0' + hex_pair + hex_pair = "0" + hex_pair hex_string += hex_pair packed_bytes = struct.pack( - '>L', int(hex_string.replace(':', ''), 16)) + ">L", int(hex_string.replace(":", ""), 16) + ) else: - packed_bytes = unescaped_value.encode('utf-8') + packed_bytes = unescaped_value.encode("utf-8") return socket.inet_ntoa(packed_bytes) @staticmethod @azure_ds_telemetry_reporter def _networkd_get_value_from_leases(leases_d=None): return dhcp.networkd_get_option_from_leases( - 'OPTION_245', leases_d=leases_d) + "OPTION_245", leases_d=leases_d + ) @staticmethod @azure_ds_telemetry_reporter @@ -754,7 +875,7 @@ class WALinuxAgentShim: if option_name in line: # Example line from Ubuntu # option unknown-245 a8:3f:81:10; - leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"')) + leases.append(line.strip(" ").split(" ", 2)[-1].strip(';\n"')) # Return the "most recent" one in the list if len(leases) < 1: return None @@ -769,15 +890,16 @@ class WALinuxAgentShim: if not os.path.exists(hooks_dir): LOG.debug("%s not found.", hooks_dir) return None - hook_files = [os.path.join(hooks_dir, x) - for x in os.listdir(hooks_dir)] + hook_files = [ + os.path.join(hooks_dir, x) for x in os.listdir(hooks_dir) + ] for hook_file in hook_files: try: - name = os.path.basename(hook_file).replace('.json', '') + name = os.path.basename(hook_file).replace(".json", "") dhcp_options[name] = json.loads(util.load_file((hook_file))) except ValueError as e: raise ValueError( - '{_file} is not valid JSON data'.format(_file=hook_file) + "{_file} is not valid JSON data".format(_file=hook_file) ) from e return dhcp_options @@ -789,7 +911,7 @@ class WALinuxAgentShim: # the MS endpoint server is given to us as DHPC option 245 _value = None for interface in dhcp_options: - _value = dhcp_options[interface].get('unknown_245', None) + _value = dhcp_options[interface].get("unknown_245", None) if _value is not None: LOG.debug("Endpoint server found in dhclient options") break @@ -819,51 +941,73 @@ class WALinuxAgentShim: LOG.debug("Using Azure Endpoint from dhcp options") if value is None: report_diagnostic_event( - 'No Azure endpoint from dhcp options. ' - 'Finding Azure endpoint from networkd...', - logger_func=LOG.debug) + "No Azure endpoint from dhcp options. " + "Finding Azure endpoint from networkd...", + logger_func=LOG.debug, + ) value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json # a dhclient exit hook that calls cloud-init-dhclient-hook report_diagnostic_event( - 'No Azure endpoint from networkd. ' - 'Finding Azure endpoint from hook json...', - logger_func=LOG.debug) + "No Azure endpoint from networkd. " + "Finding Azure endpoint from hook json...", + logger_func=LOG.debug, + ) dhcp_options = WALinuxAgentShim._load_dhclient_json() value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options) if value is None: # Fallback and check the leases file if unsuccessful report_diagnostic_event( - 'No Azure endpoint from dhclient logs. ' - 'Unable to find endpoint in dhclient logs. ' - 'Falling back to check lease files', - logger_func=LOG.debug) + "No Azure endpoint from dhclient logs. " + "Unable to find endpoint in dhclient logs. " + "Falling back to check lease files", + logger_func=LOG.debug, + ) if fallback_lease_file is None: report_diagnostic_event( - 'No fallback lease file was specified.', - logger_func=LOG.warning) + "No fallback lease file was specified.", + logger_func=LOG.warning, + ) value = None else: report_diagnostic_event( - 'Looking for endpoint in lease file %s' - % fallback_lease_file, logger_func=LOG.debug) + "Looking for endpoint in lease file %s" + % fallback_lease_file, + logger_func=LOG.debug, + ) value = WALinuxAgentShim._get_value_from_leases_file( - fallback_lease_file) + fallback_lease_file + ) if value is None: value = DEFAULT_WIRESERVER_ENDPOINT report_diagnostic_event( - 'No lease found; using default endpoint: %s' % value, - logger_func=LOG.warning) + "No lease found; using default endpoint: %s" % value, + logger_func=LOG.warning, + ) endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) report_diagnostic_event( - 'Azure endpoint found at %s' % endpoint_ip_address, - logger_func=LOG.debug) + "Azure endpoint found at %s" % endpoint_ip_address, + logger_func=LOG.debug, + ) return endpoint_ip_address @azure_ds_telemetry_reporter - def register_with_azure_and_fetch_data(self, pubkey_info=None) -> dict: + def eject_iso(self, iso_dev) -> None: + try: + LOG.debug("Ejecting the provisioning iso") + subp.subp(["eject", iso_dev]) + except Exception as e: + report_diagnostic_event( + "Failed ejecting the provisioning iso: %s" % e, + logger_func=LOG.debug, + ) + + @azure_ds_telemetry_reporter + def register_with_azure_and_fetch_data( + self, pubkey_info=None, iso_dev=None + ) -> Optional[List[str]]: """Gets the VM's GoalState from Azure, uses the GoalState information to report ready/send the ready signal/provisioning complete signal to Azure, and then uses pubkey_info to filter and obtain the user's @@ -880,7 +1024,8 @@ class WALinuxAgentShim: http_client_certificate = self.openssl_manager.certificate if self.azure_endpoint_client is None: self.azure_endpoint_client = AzureEndpointHttpClient( - http_client_certificate) + http_client_certificate + ) goal_state = self._fetch_goal_state_from_azure( need_certificate=http_client_certificate is not None ) @@ -888,9 +1033,14 @@ class WALinuxAgentShim: if pubkey_info is not None: ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info) health_reporter = GoalStateHealthReporter( - goal_state, self.azure_endpoint_client, self.endpoint) + goal_state, self.azure_endpoint_client, self.endpoint + ) + + if iso_dev is not None: + self.eject_iso(iso_dev) + health_reporter.send_ready_signal() - return {'public-keys': ssh_keys} + return ssh_keys @azure_ds_telemetry_reporter def register_with_azure_and_report_failure(self, description: str) -> None: @@ -903,13 +1053,14 @@ class WALinuxAgentShim: self.azure_endpoint_client = AzureEndpointHttpClient(None) goal_state = self._fetch_goal_state_from_azure(need_certificate=False) health_reporter = GoalStateHealthReporter( - goal_state, self.azure_endpoint_client, self.endpoint) + goal_state, self.azure_endpoint_client, self.endpoint + ) health_reporter.send_failure_signal(description=description) @azure_ds_telemetry_reporter def _fetch_goal_state_from_azure( - self, - need_certificate: bool) -> GoalState: + self, need_certificate: bool + ) -> GoalState: """Fetches the GoalState XML from the Azure endpoint, parses the XML, and returns a GoalState object. @@ -918,8 +1069,7 @@ class WALinuxAgentShim: """ unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure() return self._parse_raw_goal_state_xml( - unparsed_goal_state_xml, - need_certificate + unparsed_goal_state_xml, need_certificate ) @azure_ds_telemetry_reporter @@ -930,27 +1080,29 @@ class WALinuxAgentShim: @return: GoalState XML string """ - LOG.info('Registering with Azure...') - url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint) + LOG.info("Registering with Azure...") + url = "http://{}/machine/?comp=goalstate".format(self.endpoint) try: with events.ReportEventStack( - name="goalstate-retrieval", - description="retrieve goalstate", - parent=azure_ds_reporter): + name="goalstate-retrieval", + description="retrieve goalstate", + parent=azure_ds_reporter, + ): response = self.azure_endpoint_client.get(url) except Exception as e: report_diagnostic_event( - 'failed to register with Azure and fetch GoalState XML: %s' - % e, logger_func=LOG.warning) + "failed to register with Azure and fetch GoalState XML: %s" + % e, + logger_func=LOG.warning, + ) raise - LOG.debug('Successfully fetched GoalState XML.') + LOG.debug("Successfully fetched GoalState XML.") return response.contents @azure_ds_telemetry_reporter def _parse_raw_goal_state_xml( - self, - unparsed_goal_state_xml: str, - need_certificate: bool) -> GoalState: + self, unparsed_goal_state_xml: str, need_certificate: bool + ) -> GoalState: """Parses a GoalState XML string and returns a GoalState object. @param unparsed_goal_state_xml: GoalState XML string @@ -961,23 +1113,28 @@ class WALinuxAgentShim: goal_state = GoalState( unparsed_goal_state_xml, self.azure_endpoint_client, - need_certificate + need_certificate, ) except Exception as e: report_diagnostic_event( - 'Error processing GoalState XML: %s' % e, - logger_func=LOG.warning) + "Error processing GoalState XML: %s" % e, + logger_func=LOG.warning, + ) raise - msg = ', '.join([ - 'GoalState XML container id: %s' % goal_state.container_id, - 'GoalState XML instance id: %s' % goal_state.instance_id, - 'GoalState XML incarnation: %s' % goal_state.incarnation]) + msg = ", ".join( + [ + "GoalState XML container id: %s" % goal_state.container_id, + "GoalState XML instance id: %s" % goal_state.instance_id, + "GoalState XML incarnation: %s" % goal_state.incarnation, + ] + ) report_diagnostic_event(msg, logger_func=LOG.debug) return goal_state @azure_ds_telemetry_reporter def _get_user_pubkeys( - self, goal_state: GoalState, pubkey_info: list) -> list: + self, goal_state: GoalState, pubkey_info: list + ) -> list: """Gets and filters the VM admin user's authorized pubkeys. The admin user in this case is the username specified as "admin" @@ -1005,15 +1162,16 @@ class WALinuxAgentShim: """ ssh_keys = [] if goal_state.certificates_xml is not None and pubkey_info is not None: - LOG.debug('Certificate XML found; parsing out public keys.') + LOG.debug("Certificate XML found; parsing out public keys.") keys_by_fingerprint = self.openssl_manager.parse_certificates( - goal_state.certificates_xml) + goal_state.certificates_xml + ) ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) return ssh_keys @staticmethod def _filter_pubkeys(keys_by_fingerprint: dict, pubkey_info: list) -> list: - """ Filter and return only the user's actual pubkeys. + """Filter and return only the user's actual pubkeys. @param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict that was obtained from GoalState Certificates XML. May contain @@ -1026,70 +1184,65 @@ class WALinuxAgentShim: """ keys = [] for pubkey in pubkey_info: - if 'value' in pubkey and pubkey['value']: - keys.append(pubkey['value']) - elif 'fingerprint' in pubkey and pubkey['fingerprint']: - fingerprint = pubkey['fingerprint'] + if "value" in pubkey and pubkey["value"]: + keys.append(pubkey["value"]) + elif "fingerprint" in pubkey and pubkey["fingerprint"]: + fingerprint = pubkey["fingerprint"] if fingerprint in keys_by_fingerprint: keys.append(keys_by_fingerprint[fingerprint]) else: - LOG.warning("ovf-env.xml specified PublicKey fingerprint " - "%s not found in goalstate XML", fingerprint) + LOG.warning( + "ovf-env.xml specified PublicKey fingerprint " + "%s not found in goalstate XML", + fingerprint, + ) else: - LOG.warning("ovf-env.xml specified PublicKey with neither " - "value nor fingerprint: %s", pubkey) + LOG.warning( + "ovf-env.xml specified PublicKey with neither " + "value nor fingerprint: %s", + pubkey, + ) return keys @azure_ds_telemetry_reporter -def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, - pubkey_info=None): - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, - dhcp_options=dhcp_opts) +def get_metadata_from_fabric( + fallback_lease_file=None, dhcp_opts=None, pubkey_info=None, iso_dev=None +): + shim = WALinuxAgentShim( + fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts + ) try: - return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) + return shim.register_with_azure_and_fetch_data( + pubkey_info=pubkey_info, iso_dev=iso_dev + ) finally: shim.clean_up() @azure_ds_telemetry_reporter -def report_failure_to_fabric(fallback_lease_file=None, dhcp_opts=None, - description=None): - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, - dhcp_options=dhcp_opts) +def report_failure_to_fabric( + fallback_lease_file=None, dhcp_opts=None, description=None +): + shim = WALinuxAgentShim( + fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts + ) if not description: description = DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE try: - shim.register_with_azure_and_report_failure( - description=description) + shim.register_with_azure_and_report_failure(description=description) finally: shim.clean_up() def dhcp_log_cb(out, err): report_diagnostic_event( - "dhclient output stream: %s" % out, logger_func=LOG.debug) + "dhclient output stream: %s" % out, logger_func=LOG.debug + ) report_diagnostic_event( - "dhclient error stream: %s" % err, logger_func=LOG.debug) - - -class EphemeralDHCPv4WithReporting: - def __init__(self, reporter, nic=None): - self.reporter = reporter - self.ephemeralDHCPv4 = EphemeralDHCPv4( - iface=nic, dhcp_log_func=dhcp_log_cb) - - def __enter__(self): - with events.ReportEventStack( - name="obtain-dhcp-lease", - description="obtain dhcp lease", - parent=self.reporter): - return self.ephemeralDHCPv4.__enter__() - - def __exit__(self, excp_type, excp_value, excp_traceback): - self.ephemeralDHCPv4.__exit__( - excp_type, excp_value, excp_traceback) + "dhclient error stream: %s" % err, logger_func=LOG.debug + ) # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index f9be4ecb..72515caf 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -8,20 +8,18 @@ import random from cloudinit import dmi from cloudinit import log as logging from cloudinit import net as cloudnet -from cloudinit import url_helper -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, url_helper, util -NIC_MAP = {'public': 'eth0', 'private': 'eth1'} +NIC_MAP = {"public": "eth0", "private": "eth1"} LOG = logging.getLogger(__name__) def assign_ipv4_link_local(distro, nic=None): - """Bring up NIC using an address using link-local (ip4LL) IPs. On - DigitalOcean, the link-local domain is per-droplet routed, so there - is no risk of collisions. However, to be more safe, the ip4LL - address is random. + """Bring up NIC using an address using link-local (ip4LL) IPs. + On DigitalOcean, the link-local domain is per-droplet routed, so there + is no risk of collisions. However, to be more safe, the ip4LL + address is random. """ if not nic: @@ -29,18 +27,22 @@ def assign_ipv4_link_local(distro, nic=None): LOG.debug("selected interface '%s' for reading metadata", nic) if not nic: - raise RuntimeError("unable to find interfaces to access the" - "meta-data server. This droplet is broken.") + raise RuntimeError( + "unable to find interfaces to access the" + "meta-data server. This droplet is broken." + ) - addr = "169.254.{0}.{1}/16".format(random.randint(1, 168), - random.randint(0, 255)) + addr = "169.254.{0}.{1}/16".format( + random.randint(1, 168), random.randint(0, 255) + ) - ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic] - ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up'] + ip_addr_cmd = ["ip", "addr", "add", addr, "dev", nic] + ip_link_cmd = ["ip", "link", "set", "dev", nic, "up"] - if not subp.which('ip'): - raise RuntimeError("No 'ip' command available to configure ip4LL " - "address") + if not subp.which("ip"): + raise RuntimeError( + "No 'ip' command available to configure ip4LL address" + ) try: subp.subp(ip_addr_cmd) @@ -48,8 +50,13 @@ def assign_ipv4_link_local(distro, nic=None): subp.subp(ip_link_cmd) LOG.debug("brought device '%s' up", nic) except Exception: - util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed." - " Droplet networking will be broken", addr, nic) + util.logexc( + LOG, + "ip4LL address assignment of '%s' to '%s' failed." + " Droplet networking will be broken", + addr, + nic, + ) raise return nic @@ -63,21 +70,23 @@ def get_link_local_nic(distro): ] if not nics: return None - return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex')) + return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, "ifindex")) def del_ipv4_link_local(nic=None): """Remove the ip4LL address. While this is not necessary, the ip4LL - address is extraneous and confusing to users. + address is extraneous and confusing to users. """ if not nic: - LOG.debug("no link_local address interface defined, skipping link " - "local address cleanup") + LOG.debug( + "no link_local address interface defined, skipping link " + "local address cleanup" + ) return LOG.debug("cleaning up ipv4LL address") - ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic] + ip_addr_cmd = ["ip", "addr", "flush", "dev", nic] try: subp.subp(ip_addr_cmd) @@ -89,44 +98,47 @@ def del_ipv4_link_local(nic=None): def convert_network_configuration(config, dns_servers): """Convert the DigitalOcean Network description into Cloud-init's netconfig - format. - - Example JSON: - {'public': [ - {'mac': '04:01:58:27:7f:01', - 'ipv4': {'gateway': '45.55.32.1', - 'netmask': '255.255.224.0', - 'ip_address': '45.55.50.93'}, - 'anchor_ipv4': { - 'gateway': '10.17.0.1', - 'netmask': '255.255.0.0', - 'ip_address': '10.17.0.9'}, - 'type': 'public', - 'ipv6': {'gateway': '....', - 'ip_address': '....', - 'cidr': 64}} - ], - 'private': [ - {'mac': '04:01:58:27:7f:02', - 'ipv4': {'gateway': '10.132.0.1', - 'netmask': '255.255.0.0', - 'ip_address': '10.132.75.35'}, - 'type': 'private'} - ] - } + format. + + Example JSON: + {'public': [ + {'mac': '04:01:58:27:7f:01', + 'ipv4': {'gateway': '45.55.32.1', + 'netmask': '255.255.224.0', + 'ip_address': '45.55.50.93'}, + 'anchor_ipv4': { + 'gateway': '10.17.0.1', + 'netmask': '255.255.0.0', + 'ip_address': '10.17.0.9'}, + 'type': 'public', + 'ipv6': {'gateway': '....', + 'ip_address': '....', + 'cidr': 64}} + ], + 'private': [ + {'mac': '04:01:58:27:7f:02', + 'ipv4': {'gateway': '10.132.0.1', + 'netmask': '255.255.0.0', + 'ip_address': '10.132.75.35'}, + 'type': 'private'} + ] + } """ def _get_subnet_part(pcfg): - subpart = {'type': 'static', - 'control': 'auto', - 'address': pcfg.get('ip_address'), - 'gateway': pcfg.get('gateway')} - - if ":" in pcfg.get('ip_address'): - subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'), - pcfg.get('cidr')) + subpart = { + "type": "static", + "control": "auto", + "address": pcfg.get("ip_address"), + "gateway": pcfg.get("gateway"), + } + + if ":" in pcfg.get("ip_address"): + subpart["address"] = "{0}/{1}".format( + pcfg.get("ip_address"), pcfg.get("cidr") + ) else: - subpart['netmask'] = pcfg.get('netmask') + subpart["netmask"] = pcfg.get("netmask") return subpart @@ -138,54 +150,66 @@ def convert_network_configuration(config, dns_servers): nic = config[n][0] LOG.debug("considering %s", nic) - mac_address = nic.get('mac') + mac_address = nic.get("mac") if mac_address not in macs_to_nics: - raise RuntimeError("Did not find network interface on system " - "with mac '%s'. Cannot apply configuration: %s" - % (mac_address, nic)) + raise RuntimeError( + "Did not find network interface on system " + "with mac '%s'. Cannot apply configuration: %s" + % (mac_address, nic) + ) sysfs_name = macs_to_nics.get(mac_address) - nic_type = nic.get('type', 'unknown') + nic_type = nic.get("type", "unknown") if_name = NIC_MAP.get(nic_type, sysfs_name) if if_name != sysfs_name: - LOG.debug("Found %s interface '%s' on '%s', assigned name of '%s'", - nic_type, mac_address, sysfs_name, if_name) + LOG.debug( + "Found %s interface '%s' on '%s', assigned name of '%s'", + nic_type, + mac_address, + sysfs_name, + if_name, + ) else: - msg = ("Found interface '%s' on '%s', which is not a public " - "or private interface. Using default system naming.") + msg = ( + "Found interface '%s' on '%s', which is not a public " + "or private interface. Using default system naming." + ) LOG.debug(msg, mac_address, sysfs_name) - ncfg = {'type': 'physical', - 'mac_address': mac_address, - 'name': if_name} + ncfg = { + "type": "physical", + "mac_address": mac_address, + "name": if_name, + } subnets = [] - for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'): + for netdef in ("ipv4", "ipv6", "anchor_ipv4", "anchor_ipv6"): raw_subnet = nic.get(netdef, None) if not raw_subnet: continue sub_part = _get_subnet_part(raw_subnet) if nic_type != "public" or "anchor" in netdef: - del sub_part['gateway'] + del sub_part["gateway"] subnets.append(sub_part) - ncfg['subnets'] = subnets + ncfg["subnets"] = subnets nic_configs.append(ncfg) LOG.debug("nic '%s' configuration: %s", if_name, ncfg) if dns_servers: LOG.debug("added dns servers: %s", dns_servers) - nic_configs.append({'type': 'nameserver', 'address': dns_servers}) + nic_configs.append({"type": "nameserver", "address": dns_servers}) - return {'version': 1, 'config': nic_configs} + return {"version": 1, "config": nic_configs} def read_metadata(url, timeout=2, sec_between=2, retries=30): - response = url_helper.readurl(url, timeout=timeout, - sec_between=sec_between, retries=retries) + response = url_helper.readurl( + url, timeout=timeout, sec_between=sec_between, retries=retries + ) if not response.ok(): raise RuntimeError("unable to read metadata at %s" % url) return json.loads(response.contents.decode()) @@ -202,16 +226,21 @@ def read_sysinfo(): droplet_id = dmi.read_dmi_data("system-serial-number") if droplet_id: - LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s", - droplet_id) + LOG.debug( + "system identified via SMBIOS as DigitalOcean Droplet: %s", + droplet_id, + ) else: - msg = ("system identified via SMBIOS as a DigitalOcean " - "Droplet, but did not provide an ID. Please file a " - "support ticket at: " - "https://cloud.digitalocean.com/support/tickets/new") + msg = ( + "system identified via SMBIOS as a DigitalOcean " + "Droplet, but did not provide an ID. Please file a " + "support ticket at: " + "https://cloud.digitalocean.com/support/tickets/new" + ) LOG.critical(msg) raise RuntimeError(msg) return (True, droplet_id) + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py index 33dc4c53..592ae80b 100644 --- a/cloudinit/sources/helpers/hetzner.py +++ b/cloudinit/sources/helpers/hetzner.py @@ -3,24 +3,25 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import url_helper -from cloudinit import util - import base64 import binascii +from cloudinit import url_helper, util + def read_metadata(url, timeout=2, sec_between=2, retries=30): - response = url_helper.readurl(url, timeout=timeout, - sec_between=sec_between, retries=retries) + response = url_helper.readurl( + url, timeout=timeout, sec_between=sec_between, retries=retries + ) if not response.ok(): raise RuntimeError("unable to read metadata at %s" % url) return util.load_yaml(response.contents.decode()) def read_userdata(url, timeout=2, sec_between=2, retries=30): - response = url_helper.readurl(url, timeout=timeout, - sec_between=sec_between, retries=retries) + response = url_helper.readurl( + url, timeout=timeout, sec_between=sec_between, retries=retries + ) if not response.ok(): raise RuntimeError("unable to read userdata at %s" % url) return response.contents diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py index e13d6834..2953e858 100644 --- a/cloudinit/sources/helpers/netlink.py +++ b/cloudinit/sources/helpers/netlink.py @@ -2,14 +2,14 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import log as logging -from cloudinit import util -from collections import namedtuple - import os import select import socket import struct +from collections import namedtuple + +from cloudinit import log as logging +from cloudinit import util LOG = logging.getLogger(__name__) @@ -47,29 +47,30 @@ OPER_TESTING = 4 OPER_DORMANT = 5 OPER_UP = 6 -RTAAttr = namedtuple('RTAAttr', ['length', 'rta_type', 'data']) -InterfaceOperstate = namedtuple('InterfaceOperstate', ['ifname', 'operstate']) -NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq', - 'pid']) +RTAAttr = namedtuple("RTAAttr", ["length", "rta_type", "data"]) +InterfaceOperstate = namedtuple("InterfaceOperstate", ["ifname", "operstate"]) +NetlinkHeader = namedtuple( + "NetlinkHeader", ["length", "type", "flags", "seq", "pid"] +) class NetlinkCreateSocketError(RuntimeError): - '''Raised if netlink socket fails during create or bind.''' + """Raised if netlink socket fails during create or bind.""" def create_bound_netlink_socket(): - '''Creates netlink socket and bind on netlink group to catch interface + """Creates netlink socket and bind on netlink group to catch interface down/up events. The socket will bound only on RTMGRP_LINK (which only includes RTM_NEWLINK/RTM_DELLINK/RTM_GETLINK events). The socket is set to non-blocking mode since we're only receiving messages. :returns: netlink socket in non-blocking mode :raises: NetlinkCreateSocketError - ''' + """ try: - netlink_socket = socket.socket(socket.AF_NETLINK, - socket.SOCK_RAW, - socket.NETLINK_ROUTE) + netlink_socket = socket.socket( + socket.AF_NETLINK, socket.SOCK_RAW, socket.NETLINK_ROUTE + ) netlink_socket.bind((os.getpid(), RTMGRP_LINK)) netlink_socket.setblocking(0) except socket.error as e: @@ -80,7 +81,7 @@ def create_bound_netlink_socket(): def get_netlink_msg_header(data): - '''Gets netlink message type and length + """Gets netlink message type and length :param: data read from netlink socket :returns: netlink message type @@ -92,18 +93,20 @@ def get_netlink_msg_header(data): __u32 nlmsg_seq; /* Sequence number */ __u32 nlmsg_pid; /* Sender port ID */ }; - ''' - assert (data is not None), ("data is none") - assert (len(data) >= NLMSGHDR_SIZE), ( - "data is smaller than netlink message header") - msg_len, msg_type, flags, seq, pid = struct.unpack(NLMSGHDR_FMT, - data[:MSG_TYPE_OFFSET]) + """ + assert data is not None, "data is none" + assert ( + len(data) >= NLMSGHDR_SIZE + ), "data is smaller than netlink message header" + msg_len, msg_type, flags, seq, pid = struct.unpack( + NLMSGHDR_FMT, data[:MSG_TYPE_OFFSET] + ) LOG.debug("Got netlink msg of type %d", msg_type) return NetlinkHeader(msg_len, msg_type, flags, seq, pid) def read_netlink_socket(netlink_socket, timeout=None): - '''Select and read from the netlink socket if ready. + """Select and read from the netlink socket if ready. :param: netlink_socket: specify which socket object to read from :param: timeout: specify a timeout value (integer) to wait while reading, @@ -111,8 +114,8 @@ def read_netlink_socket(netlink_socket, timeout=None): :returns: string of data read (max length = <MAX_SIZE>) from socket, if no data read, returns None :raises: AssertionError if netlink_socket is None - ''' - assert (netlink_socket is not None), ("netlink socket is none") + """ + assert netlink_socket is not None, "netlink socket is none" read_set, _, _ = select.select([netlink_socket], [], [], timeout) # Incase of timeout,read_set doesn't contain netlink socket. # just return from this function @@ -126,32 +129,33 @@ def read_netlink_socket(netlink_socket, timeout=None): def unpack_rta_attr(data, offset): - '''Unpack a single rta attribute. + """Unpack a single rta attribute. :param: data: string of data read from netlink socket :param: offset: starting offset of RTA Attribute :return: RTAAttr object with length, type and data. On error, return None. :raises: AssertionError if data is None or offset is not integer. - ''' - assert (data is not None), ("data is none") - assert (type(offset) == int), ("offset is not integer") - assert (offset >= RTATTR_START_OFFSET), ( - "rta offset is less than expected length") + """ + assert data is not None, "data is none" + assert type(offset) == int, "offset is not integer" + assert ( + offset >= RTATTR_START_OFFSET + ), "rta offset is less than expected length" length = rta_type = 0 attr_data = None try: length = struct.unpack_from("H", data, offset=offset)[0] - rta_type = struct.unpack_from("H", data, offset=offset+2)[0] + rta_type = struct.unpack_from("H", data, offset=offset + 2)[0] except struct.error: return None # Should mean our offset is >= remaining data # Unpack just the attribute's data. Offset by 4 to skip length/type header - attr_data = data[offset+RTA_DATA_START_OFFSET:offset+length] + attr_data = data[offset + RTA_DATA_START_OFFSET : offset + length] return RTAAttr(length, rta_type, attr_data) def read_rta_oper_state(data): - '''Reads Interface name and operational state from RTA Data. + """Reads Interface name and operational state from RTA Data. :param: data: string of data read from netlink socket :returns: InterfaceOperstate object containing if_name and oper_state. @@ -159,10 +163,11 @@ def read_rta_oper_state(data): IFLA_IFNAME messages. :raises: AssertionError if data is None or length of data is smaller than RTATTR_START_OFFSET. - ''' - assert (data is not None), ("data is none") - assert (len(data) > RTATTR_START_OFFSET), ( - "length of data is smaller than RTATTR_START_OFFSET") + """ + assert data is not None, "data is none" + assert ( + len(data) > RTATTR_START_OFFSET + ), "length of data is smaller than RTATTR_START_OFFSET" ifname = operstate = None offset = RTATTR_START_OFFSET while offset <= len(data): @@ -170,15 +175,16 @@ def read_rta_oper_state(data): if not attr or attr.length == 0: break # Each attribute is 4-byte aligned. Determine pad length. - padlen = (PAD_ALIGNMENT - - (attr.length % PAD_ALIGNMENT)) % PAD_ALIGNMENT + padlen = ( + PAD_ALIGNMENT - (attr.length % PAD_ALIGNMENT) + ) % PAD_ALIGNMENT offset += attr.length + padlen if attr.rta_type == IFLA_OPERSTATE: operstate = ord(attr.data) elif attr.rta_type == IFLA_IFNAME: - interface_name = util.decode_binary(attr.data, 'utf-8') - ifname = interface_name.strip('\0') + interface_name = util.decode_binary(attr.data, "utf-8") + ifname = interface_name.strip("\0") if not ifname or operstate is None: return None LOG.debug("rta attrs: ifname %s operstate %d", ifname, operstate) @@ -186,12 +192,12 @@ def read_rta_oper_state(data): def wait_for_nic_attach_event(netlink_socket, existing_nics): - '''Block until a single nic is attached. + """Block until a single nic is attached. :param: netlink_socket: netlink_socket to receive events :param: existing_nics: List of existing nics so that we can skip them. :raises: AssertionError if netlink_socket is none. - ''' + """ LOG.debug("Preparing to wait for nic attach.") ifname = None @@ -204,19 +210,21 @@ def wait_for_nic_attach_event(netlink_socket, existing_nics): # We can return even if the operational state of the new nic is DOWN # because we set it to UP before doing dhcp. - read_netlink_messages(netlink_socket, - None, - [RTM_NEWLINK], - [OPER_UP, OPER_DOWN], - should_continue_cb) + read_netlink_messages( + netlink_socket, + None, + [RTM_NEWLINK], + [OPER_UP, OPER_DOWN], + should_continue_cb, + ) return ifname def wait_for_nic_detach_event(netlink_socket): - '''Block until a single nic is detached and its operational state is down. + """Block until a single nic is detached and its operational state is down. :param: netlink_socket: netlink_socket to receive events. - ''' + """ LOG.debug("Preparing to wait for nic detach.") ifname = None @@ -225,16 +233,14 @@ def wait_for_nic_detach_event(netlink_socket): ifname = iname return False - read_netlink_messages(netlink_socket, - None, - [RTM_DELLINK], - [OPER_DOWN], - should_continue_cb) + read_netlink_messages( + netlink_socket, None, [RTM_DELLINK], [OPER_DOWN], should_continue_cb + ) return ifname def wait_for_media_disconnect_connect(netlink_socket, ifname): - '''Block until media disconnect and connect has happened on an interface. + """Block until media disconnect and connect has happened on an interface. Listens on netlink socket to receive netlink events and when the carrier changes from 0 to 1, it considers event has happened and return from this function @@ -242,10 +248,10 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname): :param: netlink_socket: netlink_socket to receive events :param: ifname: Interface name to lookout for netlink events :raises: AssertionError if netlink_socket is None or ifname is None. - ''' - assert (netlink_socket is not None), ("netlink socket is none") - assert (ifname is not None), ("interface name is none") - assert (len(ifname) > 0), ("interface name cannot be empty") + """ + assert netlink_socket is not None, "netlink socket is none" + assert ifname is not None, "interface name is none" + assert len(ifname) > 0, "interface name cannot be empty" def should_continue_cb(iname, carrier, prevCarrier): # check for carrier down, up sequence @@ -256,19 +262,23 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname): return True LOG.debug("Wait for media disconnect and reconnect to happen") - read_netlink_messages(netlink_socket, - ifname, - [RTM_NEWLINK, RTM_DELLINK], - [OPER_UP, OPER_DOWN], - should_continue_cb) - - -def read_netlink_messages(netlink_socket, - ifname_filter, - rtm_types, - operstates, - should_continue_callback): - ''' Reads from the netlink socket until the condition specified by + read_netlink_messages( + netlink_socket, + ifname, + [RTM_NEWLINK, RTM_DELLINK], + [OPER_UP, OPER_DOWN], + should_continue_cb, + ) + + +def read_netlink_messages( + netlink_socket, + ifname_filter, + rtm_types, + operstates, + should_continue_callback, +): + """Reads from the netlink socket until the condition specified by the continuation callback is met. :param: netlink_socket: netlink_socket to receive events. @@ -276,7 +286,7 @@ def read_netlink_messages(netlink_socket, :param: rtm_types: Type of netlink events to listen for. :param: operstates: Operational states to listen. :param: should_continue_callback: Specifies when to stop listening. - ''' + """ if netlink_socket is None: raise RuntimeError("Netlink socket is none") data = bytes() @@ -286,9 +296,9 @@ def read_netlink_messages(netlink_socket, recv_data = read_netlink_socket(netlink_socket, SELECT_TIMEOUT) if recv_data is None: continue - LOG.debug('read %d bytes from socket', len(recv_data)) + LOG.debug("read %d bytes from socket", len(recv_data)) data += recv_data - LOG.debug('Length of data after concat %d', len(data)) + LOG.debug("Length of data after concat %d", len(data)) offset = 0 datalen = len(data) while offset < datalen: @@ -300,30 +310,37 @@ def read_netlink_messages(netlink_socket, if len(nl_msg) < nlheader.length: LOG.debug("Partial data. Smaller than netlink message") break - padlen = (nlheader.length+PAD_ALIGNMENT-1) & ~(PAD_ALIGNMENT-1) + padlen = (nlheader.length + PAD_ALIGNMENT - 1) & ~( + PAD_ALIGNMENT - 1 + ) offset = offset + padlen - LOG.debug('offset to next netlink message: %d', offset) + LOG.debug("offset to next netlink message: %d", offset) # Continue if we are not interested in this message. if nlheader.type not in rtm_types: continue interface_state = read_rta_oper_state(nl_msg) if interface_state is None: - LOG.debug('Failed to read rta attributes: %s', interface_state) + LOG.debug("Failed to read rta attributes: %s", interface_state) continue - if (ifname_filter is not None and - interface_state.ifname != ifname_filter): + if ( + ifname_filter is not None + and interface_state.ifname != ifname_filter + ): LOG.debug( "Ignored netlink event on interface %s. Waiting for %s.", - interface_state.ifname, ifname_filter) + interface_state.ifname, + ifname_filter, + ) continue if interface_state.operstate not in operstates: continue prevCarrier = carrier carrier = interface_state.operstate - if not should_continue_callback(interface_state.ifname, - carrier, - prevCarrier): + if not should_continue_callback( + interface_state.ifname, carrier, prevCarrier + ): return data = data[offset:] + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 3e6365f1..a42543e4 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -14,11 +14,7 @@ import os from cloudinit import ec2_utils from cloudinit import log as logging -from cloudinit import net -from cloudinit import sources -from cloudinit import subp -from cloudinit import url_helper -from cloudinit import util +from cloudinit import net, sources, subp, url_helper, util from cloudinit.sources import BrokenMetadata # See https://docs.openstack.org/user-guide/cli-config-drive.html @@ -27,30 +23,30 @@ LOG = logging.getLogger(__name__) FILES_V1 = { # Path <-> (metadata key name, translator function, default value) - 'etc/network/interfaces': ('network_config', lambda x: x, ''), - 'meta.js': ('meta_js', util.load_json, {}), - "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''), + "etc/network/interfaces": ("network_config", lambda x: x, ""), + "meta.js": ("meta_js", util.load_json, {}), + "root/.ssh/authorized_keys": ("authorized_keys", lambda x: x, ""), } KEY_COPIES = ( # Cloud-init metadata names <-> (metadata key, is required) - ('local-hostname', 'hostname', False), - ('instance-id', 'uuid', True), + ("local-hostname", "hostname", False), + ("instance-id", "uuid", True), ) # Versions and names taken from nova source nova/api/metadata/base.py -OS_LATEST = 'latest' -OS_FOLSOM = '2012-08-10' -OS_GRIZZLY = '2013-04-04' -OS_HAVANA = '2013-10-17' -OS_LIBERTY = '2015-10-15' +OS_LATEST = "latest" +OS_FOLSOM = "2012-08-10" +OS_GRIZZLY = "2013-04-04" +OS_HAVANA = "2013-10-17" +OS_LIBERTY = "2015-10-15" # NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan) -OS_NEWTON_ONE = '2016-06-30' +OS_NEWTON_ONE = "2016-06-30" # NEWTON_TWO adds vendor_data2.json (vendordata-reboot) -OS_NEWTON_TWO = '2016-10-06' +OS_NEWTON_TWO = "2016-10-06" # OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan) -OS_OCATA = '2017-02-22' +OS_OCATA = "2017-02-22" # OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs) -OS_ROCKY = '2018-08-27' +OS_ROCKY = "2018-08-27" # keep this in chronological order. new supported versions go at the end. @@ -67,18 +63,18 @@ OS_VERSIONS = ( KNOWN_PHYSICAL_TYPES = ( None, - 'bgpovs', # not present in OpenStack upstream but used on OVH cloud. - 'bridge', - 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud - 'dvs', - 'ethernet', - 'hw_veb', - 'hyperv', - 'ovs', - 'phy', - 'tap', - 'vhostuser', - 'vif', + "bgpovs", # not present in OpenStack upstream but used on OVH cloud. + "bridge", + "cascading", # not present in OpenStack upstream, used on OpenTelekomCloud + "dvs", + "ethernet", + "hw_veb", + "hyperv", + "ovs", + "phy", + "tap", + "vhostuser", + "vif", ) @@ -90,7 +86,7 @@ class SourceMixin(object): def _ec2_name_to_device(self, name): if not self.ec2_metadata: return None - bdm = self.ec2_metadata.get('block-device-mapping', {}) + bdm = self.ec2_metadata.get("block-device-mapping", {}) for (ent_name, device) in bdm.items(): if name == ent_name: return device @@ -105,9 +101,9 @@ class SourceMixin(object): def _os_name_to_device(self, name): device = None try: - criteria = 'LABEL=%s' % (name) - if name == 'swap': - criteria = 'TYPE=%s' % (name) + criteria = "LABEL=%s" % (name) + if name == "swap": + criteria = "TYPE=%s" % (name) dev_entries = util.find_devs_with(criteria) if dev_entries: device = dev_entries[0] @@ -135,10 +131,10 @@ class SourceMixin(object): return None # Try the ec2 mapping first names = [name] - if name == 'root': - names.insert(0, 'ami') - if name == 'ami': - names.append('root') + if name == "root": + names.insert(0, "ami") + if name == "ami": + names.append("root") device = None LOG.debug("Using ec2 style lookup to find device %s", names) for n in names: @@ -163,7 +159,6 @@ class SourceMixin(object): class BaseReader(metaclass=abc.ABCMeta): - def __init__(self, base_path): self.base_path = base_path @@ -187,8 +182,11 @@ class BaseReader(metaclass=abc.ABCMeta): try: versions_available = self._fetch_available_versions() except Exception as e: - LOG.debug("Unable to read openstack versions from %s due to: %s", - self.base_path, e) + LOG.debug( + "Unable to read openstack versions from %s due to: %s", + self.base_path, + e, + ) versions_available = [] # openstack.OS_VERSIONS is stored in chronological order, so @@ -202,12 +200,15 @@ class BaseReader(metaclass=abc.ABCMeta): selected_version = potential_version break - LOG.debug("Selected version '%s' from %s", selected_version, - versions_available) + LOG.debug( + "Selected version '%s' from %s", + selected_version, + versions_available, + ) return selected_version def _read_content_path(self, item, decode=False): - path = item.get('content_path', '').lstrip("/") + path = item.get("content_path", "").lstrip("/") path_pieces = path.split("/") valid_pieces = [p for p in path_pieces if len(p)] if not valid_pieces: @@ -225,38 +226,44 @@ class BaseReader(metaclass=abc.ABCMeta): """ load_json_anytype = functools.partial( - util.load_json, root_types=(dict, list, str)) + util.load_json, root_types=(dict, list, str) + ) def datafiles(version): files = {} - files['metadata'] = ( + files["metadata"] = ( # File path to read - self._path_join("openstack", version, 'meta_data.json'), + self._path_join("openstack", version, "meta_data.json"), # Is it required? True, # Translator function (applied after loading) util.load_json, ) - files['userdata'] = ( - self._path_join("openstack", version, 'user_data'), + files["userdata"] = ( + self._path_join("openstack", version, "user_data"), False, lambda x: x, ) - files['vendordata'] = ( - self._path_join("openstack", version, 'vendor_data.json'), + files["vendordata"] = ( + self._path_join("openstack", version, "vendor_data.json"), + False, + load_json_anytype, + ) + files["vendordata2"] = ( + self._path_join("openstack", version, "vendor_data2.json"), False, load_json_anytype, ) - files['networkdata'] = ( - self._path_join("openstack", version, 'network_data.json'), + files["networkdata"] = ( + self._path_join("openstack", version, "network_data.json"), False, load_json_anytype, ) return files results = { - 'userdata': '', - 'version': 2, + "userdata": "", + "version": 2, } data = datafiles(self._find_working_version()) for (name, (path, required, translator)) in data.items(): @@ -267,11 +274,13 @@ class BaseReader(metaclass=abc.ABCMeta): data = self._path_read(path) except IOError as e: if not required: - LOG.debug("Failed reading optional path %s due" - " to: %s", path, e) + LOG.debug( + "Failed reading optional path %s due to: %s", path, e + ) else: - LOG.debug("Failed reading mandatory path %s due" - " to: %s", path, e) + LOG.debug( + "Failed reading mandatory path %s due to: %s", path, e + ) else: found = True if required and not found: @@ -286,11 +295,11 @@ class BaseReader(metaclass=abc.ABCMeta): if found: results[name] = data - metadata = results['metadata'] - if 'random_seed' in metadata: - random_seed = metadata['random_seed'] + metadata = results["metadata"] + if "random_seed" in metadata: + random_seed = metadata["random_seed"] try: - metadata['random_seed'] = base64.b64decode(random_seed) + metadata["random_seed"] = base64.b64decode(random_seed) except (ValueError, TypeError) as e: raise BrokenMetadata( "Badly formatted metadata random_seed entry: %s" % e @@ -298,18 +307,18 @@ class BaseReader(metaclass=abc.ABCMeta): # load any files that were provided files = {} - metadata_files = metadata.get('files', []) + metadata_files = metadata.get("files", []) for item in metadata_files: - if 'path' not in item: + if "path" not in item: continue - path = item['path'] + path = item["path"] try: files[path] = self._read_content_path(item) except Exception as e: raise BrokenMetadata( "Failed to read provided file %s: %s" % (path, e) ) from e - results['files'] = files + results["files"] = files # The 'network_config' item in metadata is a content pointer # to the network config that should be applied. It is just a @@ -318,7 +327,7 @@ class BaseReader(metaclass=abc.ABCMeta): if net_item: try: content = self._read_content_path(net_item, decode=True) - results['network_config'] = content + results["network_config"] = content except IOError as e: raise BrokenMetadata( "Failed to read network configuration: %s" % (e) @@ -329,12 +338,12 @@ class BaseReader(metaclass=abc.ABCMeta): # if they specify 'dsmode' they're indicating the mode that they intend # for this datasource to operate in. try: - results['dsmode'] = metadata['meta']['dsmode'] + results["dsmode"] = metadata["meta"]["dsmode"] except KeyError: pass # Read any ec2-metadata (if applicable) - results['ec2-metadata'] = self._read_ec2_metadata() + results["ec2-metadata"] = self._read_ec2_metadata() # Perform some misc. metadata key renames... for (target_key, source_key, is_required) in KEY_COPIES: @@ -359,15 +368,19 @@ class ConfigDriveReader(BaseReader): def _fetch_available_versions(self): if self._versions is None: - path = self._path_join(self.base_path, 'openstack') - found = [d for d in os.listdir(path) - if os.path.isdir(os.path.join(path))] + path = self._path_join(self.base_path, "openstack") + found = [ + d + for d in os.listdir(path) + if os.path.isdir(os.path.join(path)) + ] self._versions = sorted(found) return self._versions def _read_ec2_metadata(self): - path = self._path_join(self.base_path, - 'ec2', 'latest', 'meta-data.json') + path = self._path_join( + self.base_path, "ec2", "latest", "meta-data.json" + ) if not os.path.exists(path): return {} else: @@ -414,14 +427,14 @@ class ConfigDriveReader(BaseReader): else: md[key] = copy.deepcopy(default) - keydata = md['authorized_keys'] - meta_js = md['meta_js'] + keydata = md["authorized_keys"] + meta_js = md["meta_js"] # keydata in meta_js is preferred over "injected" - keydata = meta_js.get('public-keys', keydata) + keydata = meta_js.get("public-keys", keydata) if keydata: lines = keydata.splitlines() - md['public-keys'] = [ + md["public-keys"] = [ line for line in lines if len(line) and not line.startswith("#") @@ -429,25 +442,25 @@ class ConfigDriveReader(BaseReader): # config-drive-v1 has no way for openstack to provide the instance-id # so we copy that into metadata from the user input - if 'instance-id' in meta_js: - md['instance-id'] = meta_js['instance-id'] + if "instance-id" in meta_js: + md["instance-id"] = meta_js["instance-id"] results = { - 'version': 1, - 'metadata': md, + "version": 1, + "metadata": md, } # allow the user to specify 'dsmode' in a meta tag - if 'dsmode' in meta_js: - results['dsmode'] = meta_js['dsmode'] + if "dsmode" in meta_js: + results["dsmode"] = meta_js["dsmode"] # config-drive-v1 has no way of specifying user-data, so the user has # to cheat and stuff it in a meta tag also. - results['userdata'] = meta_js.get('user-data', '') + results["userdata"] = meta_js.get("user-data", "") # this implementation does not support files other than # network/interfaces and authorized_keys... - results['files'] = {} + results["files"] = {} return results @@ -476,7 +489,6 @@ class MetadataReader(BaseReader): return self._versions def _path_read(self, path, decode=False): - def should_retry_cb(_request_args, cause): try: code = int(cause.code) @@ -487,11 +499,13 @@ class MetadataReader(BaseReader): pass return True - response = url_helper.readurl(path, - retries=self.retries, - ssl_details=self.ssl_details, - timeout=self.timeout, - exception_cb=should_retry_cb) + response = url_helper.readurl( + path, + retries=self.retries, + ssl_details=self.ssl_details, + timeout=self.timeout, + exception_cb=should_retry_cb, + ) if decode: return response.contents.decode() else: @@ -501,9 +515,11 @@ class MetadataReader(BaseReader): return url_helper.combine_url(base, *add_ons) def _read_ec2_metadata(self): - return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details, - timeout=self.timeout, - retries=self.retries) + return ec2_utils.get_instance_metadata( + ssl_details=self.ssl_details, + timeout=self.timeout, + retries=self.retries, + ) # Convert OpenStack ConfigDrive NetworkData json to network_config yaml @@ -539,32 +555,32 @@ def convert_net_json(network_json=None, known_macs=None): # dict of network_config key for filtering network_json valid_keys = { - 'physical': [ - 'name', - 'type', - 'mac_address', - 'subnets', - 'params', - 'mtu', + "physical": [ + "name", + "type", + "mac_address", + "subnets", + "params", + "mtu", ], - 'subnet': [ - 'type', - 'address', - 'netmask', - 'broadcast', - 'metric', - 'gateway', - 'pointopoint', - 'scope', - 'dns_nameservers', - 'dns_search', - 'routes', + "subnet": [ + "type", + "address", + "netmask", + "broadcast", + "metric", + "gateway", + "pointopoint", + "scope", + "dns_nameservers", + "dns_search", + "routes", ], } - links = network_json.get('links', []) - networks = network_json.get('networks', []) - services = network_json.get('services', []) + links = network_json.get("links", []) + networks = network_json.get("networks", []) + services = network_json.get("services", []) link_updates = [] link_id_info = {} @@ -573,65 +589,77 @@ def convert_net_json(network_json=None, known_macs=None): config = [] for link in links: subnets = [] - cfg = dict((k, v) for k, v in link.items() - if k in valid_keys['physical']) + cfg = dict( + (k, v) for k, v in link.items() if k in valid_keys["physical"] + ) # 'name' is not in openstack spec yet, but we will support it if it is # present. The 'id' in the spec is currently implemented as the host # nic's name, meaning something like 'tap-adfasdffd'. We do not want # to name guest devices with such ugly names. - if 'name' in link: - cfg['name'] = link['name'] + if "name" in link: + cfg["name"] = link["name"] link_mac_addr = None - if link.get('ethernet_mac_address'): - link_mac_addr = link.get('ethernet_mac_address').lower() - link_id_info[link['id']] = link_mac_addr - - curinfo = {'name': cfg.get('name'), 'mac': link_mac_addr, - 'id': link['id'], 'type': link['type']} - - for network in [n for n in networks - if n['link'] == link['id']]: - subnet = dict((k, v) for k, v in network.items() - if k in valid_keys['subnet']) - - if network['type'] == 'ipv4_dhcp': - subnet.update({'type': 'dhcp4'}) - elif network['type'] == 'ipv6_dhcp': - subnet.update({'type': 'dhcp6'}) - elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless', - 'ipv6_dhcpv6-stateful']: - subnet.update({'type': network['type']}) - elif network['type'] in ['ipv4', 'static']: - subnet.update({ - 'type': 'static', - 'address': network.get('ip_address'), - }) - elif network['type'] in ['ipv6', 'static6']: - cfg.update({'accept-ra': False}) - subnet.update({ - 'type': 'static6', - 'address': network.get('ip_address'), - }) + if link.get("ethernet_mac_address"): + link_mac_addr = link.get("ethernet_mac_address").lower() + link_id_info[link["id"]] = link_mac_addr + + curinfo = { + "name": cfg.get("name"), + "mac": link_mac_addr, + "id": link["id"], + "type": link["type"], + } + + for network in [n for n in networks if n["link"] == link["id"]]: + subnet = dict( + (k, v) for k, v in network.items() if k in valid_keys["subnet"] + ) + + if network["type"] == "ipv4_dhcp": + subnet.update({"type": "dhcp4"}) + elif network["type"] == "ipv6_dhcp": + subnet.update({"type": "dhcp6"}) + elif network["type"] in [ + "ipv6_slaac", + "ipv6_dhcpv6-stateless", + "ipv6_dhcpv6-stateful", + ]: + subnet.update({"type": network["type"]}) + elif network["type"] in ["ipv4", "static"]: + subnet.update( + { + "type": "static", + "address": network.get("ip_address"), + } + ) + elif network["type"] in ["ipv6", "static6"]: + cfg.update({"accept-ra": False}) + subnet.update( + { + "type": "static6", + "address": network.get("ip_address"), + } + ) # Enable accept_ra for stateful and legacy ipv6_dhcp types - if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']: - cfg.update({'accept-ra': True}) + if network["type"] in ["ipv6_dhcpv6-stateful", "ipv6_dhcp"]: + cfg.update({"accept-ra": True}) - if network['type'] == 'ipv4': - subnet['ipv4'] = True - if network['type'] == 'ipv6': - subnet['ipv6'] = True + if network["type"] == "ipv4": + subnet["ipv4"] = True + if network["type"] == "ipv6": + subnet["ipv6"] = True subnets.append(subnet) - cfg.update({'subnets': subnets}) - if link['type'] in ['bond']: + cfg.update({"subnets": subnets}) + if link["type"] in ["bond"]: params = {} if link_mac_addr: - params['mac_address'] = link_mac_addr + params["mac_address"] = link_mac_addr for k, v in link.items(): - if k == 'bond_links': + if k == "bond_links": continue - elif k.startswith('bond'): + elif k.startswith("bond"): params.update({k: v}) # openstack does not provide a name for the bond. @@ -644,35 +672,45 @@ def convert_net_json(network_json=None, known_macs=None): # to the network config by their nic name. # store that in bond_links_needed, and update these later. link_updates.append( - (cfg, 'bond_interfaces', '%s', - copy.deepcopy(link['bond_links'])) + ( + cfg, + "bond_interfaces", + "%s", + copy.deepcopy(link["bond_links"]), + ) + ) + cfg.update({"params": params, "name": link_name}) + + curinfo["name"] = link_name + elif link["type"] in ["vlan"]: + name = "%s.%s" % (link["vlan_link"], link["vlan_id"]) + cfg.update( + { + "name": name, + "vlan_id": link["vlan_id"], + "mac_address": link["vlan_mac_address"], + } ) - cfg.update({'params': params, 'name': link_name}) - - curinfo['name'] = link_name - elif link['type'] in ['vlan']: - name = "%s.%s" % (link['vlan_link'], link['vlan_id']) - cfg.update({ - 'name': name, - 'vlan_id': link['vlan_id'], - 'mac_address': link['vlan_mac_address'], - }) - link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link'])) - link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'], - link['vlan_link'])) - curinfo.update({'mac': link['vlan_mac_address'], - 'name': name}) + link_updates.append((cfg, "vlan_link", "%s", link["vlan_link"])) + link_updates.append( + (cfg, "name", "%%s.%s" % link["vlan_id"], link["vlan_link"]) + ) + curinfo.update({"mac": link["vlan_mac_address"], "name": name}) else: - if link['type'] not in KNOWN_PHYSICAL_TYPES: - LOG.warning('Unknown network_data link type (%s); treating as' - ' physical', link['type']) - cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) + if link["type"] not in KNOWN_PHYSICAL_TYPES: + LOG.warning( + "Unknown network_data link type (%s); treating as" + " physical", + link["type"], + ) + cfg.update({"type": "physical", "mac_address": link_mac_addr}) config.append(cfg) - link_id_info[curinfo['id']] = curinfo + link_id_info[curinfo["id"]] = curinfo - need_names = [d for d in config - if d.get('type') == 'physical' and 'name' not in d] + need_names = [ + d for d in config if d.get("type") == "physical" and "name" not in d + ] if need_names or link_updates: if known_macs is None: @@ -680,26 +718,26 @@ def convert_net_json(network_json=None, known_macs=None): # go through and fill out the link_id_info with names for _link_id, info in link_id_info.items(): - if info.get('name'): + if info.get("name"): continue - if info.get('mac') in known_macs: - info['name'] = known_macs[info['mac']] + if info.get("mac") in known_macs: + info["name"] = known_macs[info["mac"]] for d in need_names: - mac = d.get('mac_address') + mac = d.get("mac_address") if not mac: raise ValueError("No mac_address or name entry for %s" % d) if mac not in known_macs: raise ValueError("Unable to find a system nic for %s" % d) - d['name'] = known_macs[mac] + d["name"] = known_macs[mac] for cfg, key, fmt, targets in link_updates: if isinstance(targets, (list, tuple)): cfg[key] = [ - fmt % link_id_info[target]['name'] for target in targets + fmt % link_id_info[target]["name"] for target in targets ] else: - cfg[key] = fmt % link_id_info[targets]['name'] + cfg[key] = fmt % link_id_info[targets]["name"] # Infiniband interfaces may be referenced in network_data.json by a 6 byte # Ethernet MAC-style address, and we use that address to look up the @@ -708,15 +746,16 @@ def convert_net_json(network_json=None, known_macs=None): ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface() if ib_known_hwaddrs: for cfg in config: - if cfg['name'] in ib_known_hwaddrs: - cfg['mac_address'] = ib_known_hwaddrs[cfg['name']] - cfg['type'] = 'infiniband' + if cfg["name"] in ib_known_hwaddrs: + cfg["mac_address"] = ib_known_hwaddrs[cfg["name"]] + cfg["type"] = "infiniband" for service in services: cfg = service - cfg.update({'type': 'nameserver'}) + cfg.update({"type": "nameserver"}) config.append(cfg) - return {'version': 1, 'config': config} + return {"version": 1, "config": config} + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py deleted file mode 100644 index cafe3961..00000000 --- a/cloudinit/sources/helpers/tests/test_netlink.py +++ /dev/null @@ -1,480 +0,0 @@ -# Author: Tamilmani Manoharan <tamanoha@microsoft.com> -# -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import CiTestCase, mock -import socket -import struct -import codecs -from cloudinit.sources.helpers.netlink import ( - NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket, - read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect, - wait_for_nic_attach_event, wait_for_nic_detach_event, - OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT, - OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_DELLINK, - RTM_SETLINK, RTM_GETLINK, MAX_SIZE) - - -def int_to_bytes(i): - '''convert integer to binary: eg: 1 to \x01''' - hex_value = '{0:x}'.format(i) - hex_value = '0' * (len(hex_value) % 2) + hex_value - return codecs.decode(hex_value, 'hex_codec') - - -class TestCreateBoundNetlinkSocket(CiTestCase): - - @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') - def test_socket_error_on_create(self, m_socket): - '''create_bound_netlink_socket catches socket creation exception''' - - """NetlinkCreateSocketError is raised when socket creation errors.""" - m_socket.side_effect = socket.error("Fake socket failure") - with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr: - create_bound_netlink_socket() - self.assertEqual( - 'Exception during netlink socket create: Fake socket failure', - str(ctx_mgr.exception)) - - -class TestReadNetlinkSocket(CiTestCase): - - @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') - @mock.patch('cloudinit.sources.helpers.netlink.select.select') - def test_read_netlink_socket(self, m_select, m_socket): - '''read_netlink_socket able to receive data''' - data = 'netlinktest' - m_select.return_value = [m_socket], None, None - m_socket.recv.return_value = data - recv_data = read_netlink_socket(m_socket, 2) - m_select.assert_called_with([m_socket], [], [], 2) - m_socket.recv.assert_called_with(MAX_SIZE) - self.assertIsNotNone(recv_data) - self.assertEqual(recv_data, data) - - @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') - @mock.patch('cloudinit.sources.helpers.netlink.select.select') - def test_netlink_read_timeout(self, m_select, m_socket): - '''read_netlink_socket should timeout if nothing to read''' - m_select.return_value = [], None, None - data = read_netlink_socket(m_socket, 1) - m_select.assert_called_with([m_socket], [], [], 1) - self.assertEqual(m_socket.recv.call_count, 0) - self.assertIsNone(data) - - def test_read_invalid_socket(self): - '''read_netlink_socket raises assert error if socket is invalid''' - socket = None - with self.assertRaises(AssertionError) as context: - read_netlink_socket(socket, 1) - self.assertTrue('netlink socket is none' in str(context.exception)) - - -class TestParseNetlinkMessage(CiTestCase): - - def test_read_rta_oper_state(self): - '''read_rta_oper_state could parse netlink message and extract data''' - ifname = "eth0" - bytes = ifname.encode("utf-8") - buf = bytearray(48) - struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5, - 16, int_to_bytes(OPER_DOWN)) - interface_state = read_rta_oper_state(buf) - self.assertEqual(interface_state.ifname, ifname) - self.assertEqual(interface_state.operstate, OPER_DOWN) - - def test_read_none_data(self): - '''read_rta_oper_state raises assert error if data is none''' - data = None - with self.assertRaises(AssertionError) as context: - read_rta_oper_state(data) - self.assertEqual('data is none', str(context.exception)) - - def test_read_invalid_rta_operstate_none(self): - '''read_rta_oper_state returns none if operstate is none''' - ifname = "eth0" - buf = bytearray(40) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4s", buf, RTATTR_START_OFFSET, 8, 3, bytes) - interface_state = read_rta_oper_state(buf) - self.assertIsNone(interface_state) - - def test_read_invalid_rta_ifname_none(self): - '''read_rta_oper_state returns none if ifname is none''' - buf = bytearray(40) - struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16, - int_to_bytes(OPER_DOWN)) - interface_state = read_rta_oper_state(buf) - self.assertIsNone(interface_state) - - def test_read_invalid_data_len(self): - '''raise assert error if data size is smaller than required size''' - buf = bytearray(32) - with self.assertRaises(AssertionError) as context: - read_rta_oper_state(buf) - self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in - str(context.exception)) - - def test_unpack_rta_attr_none_data(self): - '''unpack_rta_attr raises assert error if data is none''' - data = None - with self.assertRaises(AssertionError) as context: - unpack_rta_attr(data, RTATTR_START_OFFSET) - self.assertTrue('data is none' in str(context.exception)) - - def test_unpack_rta_attr_invalid_offset(self): - '''unpack_rta_attr raises assert error if offset is invalid''' - data = bytearray(48) - with self.assertRaises(AssertionError) as context: - unpack_rta_attr(data, "offset") - self.assertTrue('offset is not integer' in str(context.exception)) - with self.assertRaises(AssertionError) as context: - unpack_rta_attr(data, 31) - self.assertTrue('rta offset is less than expected length' in - str(context.exception)) - - -@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') -@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') -class TestNicAttachDetach(CiTestCase): - with_logs = True - - def _media_switch_data(self, ifname, msg_type, operstate): - '''construct netlink data with specified fields''' - if ifname and operstate is not None: - data = bytearray(48) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(operstate)) - elif ifname: - data = bytearray(40) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) - elif operstate: - data = bytearray(40) - struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, - int_to_bytes(operstate)) - struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) - return data - - def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket): - '''Test for a new nic attached''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - m_read_netlink_socket.side_effect = [data_op_down] - ifread = wait_for_nic_attach_event(m_socket, []) - self.assertEqual(m_read_netlink_socket.call_count, 1) - self.assertEqual(ifname, ifread) - - def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket): - '''Test for a new nic attached''' - ifname = "eth0" - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [data_op_up] - ifread = wait_for_nic_attach_event(m_socket, []) - self.assertEqual(m_read_netlink_socket.call_count, 1) - self.assertEqual(ifname, ifread) - - def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket): - '''Test that we read only the interfaces we are interested in.''' - data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) - data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) - m_read_netlink_socket.side_effect = [data_eth0, data_eth1] - ifread = wait_for_nic_attach_event(m_socket, ["eth0"]) - self.assertEqual(m_read_netlink_socket.call_count, 2) - self.assertEqual("eth1", ifread) - - def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket): - '''Test that we read only the interfaces we are interested in.''' - data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) - data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) - m_read_netlink_socket.side_effect = [data_eth0, data_eth1] - ifread = wait_for_nic_attach_event(m_socket, ["eth1"]) - self.assertEqual(m_read_netlink_socket.call_count, 1) - self.assertEqual("eth0", ifread) - - def test_nic_detached(self, m_read_netlink_socket, m_socket): - '''Test for an existing nic detached''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN) - m_read_netlink_socket.side_effect = [data_op_down] - ifread = wait_for_nic_detach_event(m_socket) - self.assertEqual(m_read_netlink_socket.call_count, 1) - self.assertEqual(ifname, ifread) - - -@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') -@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') -class TestWaitForMediaDisconnectConnect(CiTestCase): - with_logs = True - - def _media_switch_data(self, ifname, msg_type, operstate): - '''construct netlink data with specified fields''' - if ifname and operstate is not None: - data = bytearray(48) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(operstate)) - elif ifname: - data = bytearray(40) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) - elif operstate: - data = bytearray(40) - struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, - int_to_bytes(operstate)) - struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) - return data - - def test_media_down_up_scenario(self, m_read_netlink_socket, - m_socket): - '''Test for media down up sequence for required interface name''' - ifname = "eth0" - # construct data for Oper State down - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - # construct data for Oper State up - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [data_op_down, data_op_up] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 2) - - def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket, - m_socket): - '''wait_for_media_disconnect_connect ignores unexpected interfaces. - - The first two messages are for other interfaces and last two are for - expected interface. So the function exit only after receiving last - 2 messages and therefore the call count for m_read_netlink_socket - has to be 4 - ''' - other_ifname = "eth1" - expected_ifname = "eth0" - data_op_down_eth1 = self._media_switch_data( - other_ifname, RTM_NEWLINK, OPER_DOWN - ) - data_op_up_eth1 = self._media_switch_data( - other_ifname, RTM_NEWLINK, OPER_UP - ) - data_op_down_eth0 = self._media_switch_data( - expected_ifname, RTM_NEWLINK, OPER_DOWN - ) - data_op_up_eth0 = self._media_switch_data( - expected_ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [ - data_op_down_eth1, - data_op_up_eth1, - data_op_down_eth0, - data_op_up_eth0 - ] - wait_for_media_disconnect_connect(m_socket, expected_ifname) - self.assertIn('Ignored netlink event on interface %s' % other_ifname, - self.logs.getvalue()) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect ignores GETLINK events. - - The first two messages are for oper down and up for RTM_GETLINK type - which netlink module will ignore. The last 2 messages are RTM_NEWLINK - with oper state down and up messages. Therefore the call count for - m_read_netlink_socket has to be 4 ignoring first 2 messages - of RTM_GETLINK - ''' - ifname = "eth0" - data_getlink_down = self._media_switch_data( - ifname, RTM_GETLINK, OPER_DOWN - ) - data_getlink_up = self._media_switch_data( - ifname, RTM_GETLINK, OPER_UP - ) - data_newlink_down = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DOWN - ) - data_newlink_up = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UP - ) - m_read_netlink_socket.side_effect = [ - data_getlink_down, - data_getlink_up, - data_newlink_down, - data_newlink_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect ignores SETLINK events. - - The first two messages are for oper down and up for RTM_GETLINK type - which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down - and up messages. This function should exit after 4th messages since it - sees down->up scenario. So the call count for m_read_netlink_socket - has to be 4 ignoring first 2 messages of RTM_GETLINK and - last 2 messages of RTM_NEWLINK - ''' - ifname = "eth0" - data_setlink_down = self._media_switch_data( - ifname, RTM_SETLINK, OPER_DOWN - ) - data_setlink_up = self._media_switch_data( - ifname, RTM_SETLINK, OPER_UP - ) - data_newlink_down = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DOWN - ) - data_newlink_up = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UP - ) - m_read_netlink_socket.side_effect = [ - data_setlink_down, - data_setlink_up, - data_newlink_down, - data_newlink_up, - data_newlink_down, - data_newlink_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket, - m_socket): - '''returns only if it receives UP event after a DOWN event''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - data_op_dormant = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DORMANT - ) - data_op_notpresent = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_NOTPRESENT - ) - data_op_lowerdown = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN - ) - data_op_testing = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_TESTING - ) - data_op_unknown = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UNKNOWN - ) - m_read_netlink_socket.side_effect = [ - data_op_up, data_op_up, - data_op_dormant, data_op_up, - data_op_notpresent, data_op_up, - data_op_lowerdown, data_op_up, - data_op_testing, data_op_up, - data_op_unknown, data_op_up, - data_op_down, data_op_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 14) - - def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket, - m_socket): - '''wait_for_media_disconnect_connect handles in between transitions''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - data_op_dormant = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DORMANT) - data_op_unknown = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UNKNOWN) - m_read_netlink_socket.side_effect = [ - data_op_down, data_op_dormant, - data_op_unknown, data_op_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect should handle invalid operstates. - - The function should not fail and return even if it receives invalid - operstates. It always should wait for down up sequence. - ''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7) - m_read_netlink_socket.side_effect = [ - data_op_invalid, data_op_up, - data_op_down, data_op_invalid, - data_op_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 5) - - def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect handle none netlink socket.''' - socket = None - ifname = "eth0" - with self.assertRaises(AssertionError) as context: - wait_for_media_disconnect_connect(socket, ifname) - self.assertTrue('netlink socket is none' in str(context.exception)) - - def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect handle none interface name''' - ifname = None - with self.assertRaises(AssertionError) as context: - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertTrue('interface name is none' in str(context.exception)) - ifname = "" - with self.assertRaises(AssertionError) as context: - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertTrue('interface name cannot be empty' in - str(context.exception)) - - def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket): - ''' wait_for_media_disconnect_connect handles invalid rta data''' - ifname = "eth0" - data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN) - data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None) - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [ - data_invalid1, data_invalid2, data_op_down, data_op_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket): - '''Read multiple messages in single receive call''' - ifname = "eth0" - bytes = ifname.encode("utf-8") - data = bytearray(96) - struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(OPER_DOWN) - ) - struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8, - 3, bytes, 5, 16, int_to_bytes(OPER_UP) - ) - m_read_netlink_socket.return_value = data - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 1) - - def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket): - '''Read partial messages in receive call''' - ifname = "eth0" - bytes = ifname.encode("utf-8") - data1 = bytearray(112) - data2 = bytearray(32) - struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(OPER_DOWN) - ) - struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN) - ) - struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP) - ) - m_read_netlink_socket.side_effect = [data1, data2] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 2) diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py deleted file mode 100644 index 2bde1e3f..00000000 --- a/cloudinit/sources/helpers/tests/test_openstack.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -# ./cloudinit/sources/helpers/tests/test_openstack.py - -from cloudinit.sources.helpers import openstack -from cloudinit.tests import helpers as test_helpers - - -class TestConvertNetJson(test_helpers.CiTestCase): - - def test_phy_types(self): - """Verify the different known physical types are handled.""" - # network_data.json example from - # https://docs.openstack.org/nova/latest/user/metadata.html - mac0 = "fa:16:3e:9c:bf:3d" - net_json = { - "links": [ - {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a", - "mtu": None, "type": "bridge", - "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"} - ], - "networks": [ - {"id": "network0", "link": "tapcd9f6d46-4a", - "network_id": "99e88329-f20d-4741-9593-25bf07847b16", - "type": "ipv4_dhcp"} - ], - "services": [{"address": "8.8.8.8", "type": "dns"}] - } - macs = {mac0: 'eth0'} - - expected = { - 'version': 1, - 'config': [ - {'mac_address': 'fa:16:3e:9c:bf:3d', - 'mtu': None, 'name': 'eth0', - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical'}, - {'address': '8.8.8.8', 'type': 'nameserver'}]} - - for t in openstack.KNOWN_PHYSICAL_TYPES: - net_json["links"][0]["type"] = t - self.assertEqual( - expected, - openstack.convert_net_json(network_json=net_json, - known_macs=macs)) diff --git a/cloudinit/sources/helpers/upcloud.py b/cloudinit/sources/helpers/upcloud.py new file mode 100644 index 00000000..e7b95a5e --- /dev/null +++ b/cloudinit/sources/helpers/upcloud.py @@ -0,0 +1,229 @@ +# Author: Antti Myyrä <antti.myyra@upcloud.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import dmi +from cloudinit import log as logging +from cloudinit import net as cloudnet +from cloudinit import url_helper + +LOG = logging.getLogger(__name__) + + +def convert_to_network_config_v1(config): + """ + Convert the UpCloud network metadata description into + Cloud-init's version 1 netconfig format. + + Example JSON: + { + "interfaces": [ + { + "index": 1, + "ip_addresses": [ + { + "address": "94.237.105.53", + "dhcp": true, + "dns": [ + "94.237.127.9", + "94.237.40.9" + ], + "family": "IPv4", + "floating": false, + "gateway": "94.237.104.1", + "network": "94.237.104.0/22" + }, + { + "address": "94.237.105.50", + "dhcp": false, + "dns": [], + "family": "IPv4", + "floating": true, + "gateway": "", + "network": "94.237.105.50/32" + } + ], + "mac": "32:d5:ba:4a:36:e7", + "network_id": "031457f4-0f8c-483c-96f2-eccede02909c", + "type": "public" + }, + { + "index": 2, + "ip_addresses": [ + { + "address": "10.6.3.27", + "dhcp": true, + "dns": [], + "family": "IPv4", + "floating": false, + "gateway": "10.6.0.1", + "network": "10.6.0.0/22" + } + ], + "mac": "32:d5:ba:4a:84:cc", + "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1", + "type": "utility" + }, + { + "index": 3, + "ip_addresses": [ + { + "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7", + "dhcp": true, + "dns": [ + "2a04:3540:53::1", + "2a04:3544:53::1" + ], + "family": "IPv6", + "floating": false, + "gateway": "2a04:3545:1000:720::1", + "network": "2a04:3545:1000:720::/64" + } + ], + "mac": "32:d5:ba:4a:63:e7", + "network_id": "03000000-0000-4000-8046-000000000000", + "type": "public" + }, + { + "index": 4, + "ip_addresses": [ + { + "address": "172.30.1.10", + "dhcp": true, + "dns": [], + "family": "IPv4", + "floating": false, + "gateway": "172.30.1.1", + "network": "172.30.1.0/24" + } + ], + "mac": "32:d5:ba:4a:8a:e1", + "network_id": "035a0a4a-77b4-4de5-820d-189fc8135714", + "type": "private" + } + ], + "dns": [ + "94.237.127.9", + "94.237.40.9" + ] + } + """ + + def _get_subnet_config(ip_addr, dns): + if ip_addr.get("dhcp"): + dhcp_type = "dhcp" + if ip_addr.get("family") == "IPv6": + # UpCloud currently passes IPv6 addresses via + # StateLess Address Auto Configuration (SLAAC) + dhcp_type = "ipv6_dhcpv6-stateless" + return {"type": dhcp_type} + + static_type = "static" + if ip_addr.get("family") == "IPv6": + static_type = "static6" + subpart = { + "type": static_type, + "control": "auto", + "address": ip_addr.get("address"), + } + + if ip_addr.get("gateway"): + subpart["gateway"] = ip_addr.get("gateway") + + if "/" in ip_addr.get("network"): + subpart["netmask"] = ip_addr.get("network").split("/")[1] + + if dns != ip_addr.get("dns") and ip_addr.get("dns"): + subpart["dns_nameservers"] = ip_addr.get("dns") + + return subpart + + nic_configs = [] + macs_to_interfaces = cloudnet.get_interfaces_by_mac() + LOG.debug("NIC mapping: %s", macs_to_interfaces) + + for raw_iface in config.get("interfaces"): + LOG.debug("Considering %s", raw_iface) + + mac_address = raw_iface.get("mac") + if mac_address not in macs_to_interfaces: + raise RuntimeError( + "Did not find network interface on system " + "with mac '%s'. Cannot apply configuration: %s" + % (mac_address, raw_iface) + ) + + iface_type = raw_iface.get("type") + sysfs_name = macs_to_interfaces.get(mac_address) + + LOG.debug( + "Found %s interface '%s' with address '%s' (index %d)", + iface_type, + sysfs_name, + mac_address, + raw_iface.get("index"), + ) + + interface = { + "type": "physical", + "name": sysfs_name, + "mac_address": mac_address, + } + + subnets = [] + for ip_address in raw_iface.get("ip_addresses"): + sub_part = _get_subnet_config(ip_address, config.get("dns")) + subnets.append(sub_part) + + interface["subnets"] = subnets + nic_configs.append(interface) + + if config.get("dns"): + LOG.debug("Setting DNS nameservers to %s", config.get("dns")) + nic_configs.append( + {"type": "nameserver", "address": config.get("dns")} + ) + + return {"version": 1, "config": nic_configs} + + +def convert_network_config(config): + return convert_to_network_config_v1(config) + + +def read_metadata(url, timeout=2, sec_between=2, retries=30): + response = url_helper.readurl( + url, timeout=timeout, sec_between=sec_between, retries=retries + ) + if not response.ok(): + raise RuntimeError("unable to read metadata at %s" % url) + return json.loads(response.contents.decode()) + + +def read_sysinfo(): + # UpCloud embeds vendor ID and server UUID in the + # SMBIOS information + + # Detect if we are on UpCloud and return the UUID + + vendor_name = dmi.read_dmi_data("system-manufacturer") + if vendor_name != "UpCloud": + return False, None + + server_uuid = dmi.read_dmi_data("system-uuid") + if server_uuid: + LOG.debug( + "system identified via SMBIOS as UpCloud server: %s", server_uuid + ) + else: + msg = ( + "system identified via SMBIOS as a UpCloud server, but " + "did not provide an ID. Please contact support via" + "https://hub.upcloud.com or via email with support@upcloud.com" + ) + LOG.critical(msg) + raise RuntimeError(msg) + + return True, server_uuid diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py index 9a07eafa..a5c67bb7 100644 --- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py +++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py @@ -9,7 +9,8 @@ class BootProtoEnum(object): """Specifies the NIC Boot Settings.""" - DHCP = 'dhcp' - STATIC = 'static' + DHCP = "dhcp" + STATIC = "static" + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index 7109aef3..39dacee0 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -15,18 +15,20 @@ class Config(object): Specification file. """ - CUSTOM_SCRIPT = 'CUSTOM-SCRIPT|SCRIPT-NAME' - DNS = 'DNS|NAMESERVER|' - DOMAINNAME = 'NETWORK|DOMAINNAME' - HOSTNAME = 'NETWORK|HOSTNAME' - MARKERID = 'MISC|MARKER-ID' - PASS = 'PASSWORD|-PASS' - RESETPASS = 'PASSWORD|RESET' - SUFFIX = 'DNS|SUFFIX|' - TIMEZONE = 'DATETIME|TIMEZONE' - UTC = 'DATETIME|UTC' - POST_GC_STATUS = 'MISC|POST-GC-STATUS' - DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT' + CUSTOM_SCRIPT = "CUSTOM-SCRIPT|SCRIPT-NAME" + DNS = "DNS|NAMESERVER|" + DOMAINNAME = "NETWORK|DOMAINNAME" + HOSTNAME = "NETWORK|HOSTNAME" + MARKERID = "MISC|MARKER-ID" + PASS = "PASSWORD|-PASS" + RESETPASS = "PASSWORD|RESET" + SUFFIX = "DNS|SUFFIX|" + TIMEZONE = "DATETIME|TIMEZONE" + UTC = "DATETIME|UTC" + POST_GC_STATUS = "MISC|POST-GC-STATUS" + DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT" + CLOUDINIT_META_DATA = "CLOUDINIT|METADATA" + CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA" def __init__(self, configFile): self._configFile = configFile @@ -82,8 +84,8 @@ class Config(object): def nics(self): """Return the list of associated NICs.""" res = [] - nics = self._configFile['NIC-CONFIG|NICS'] - for nic in nics.split(','): + nics = self._configFile["NIC-CONFIG|NICS"] + for nic in nics.split(","): res.append(Nic(nic, self._configFile)) return res @@ -91,11 +93,11 @@ class Config(object): @property def reset_password(self): """Retreives if the root password needs to be reset.""" - resetPass = self._configFile.get(Config.RESETPASS, 'no') + resetPass = self._configFile.get(Config.RESETPASS, "no") resetPass = resetPass.lower() - if resetPass not in ('yes', 'no'): - raise ValueError('ResetPassword value should be yes/no') - return resetPass == 'yes' + if resetPass not in ("yes", "no"): + raise ValueError("ResetPassword value should be yes/no") + return resetPass == "yes" @property def marker_id(self): @@ -110,11 +112,11 @@ class Config(object): @property def post_gc_status(self): """Return whether to post guestinfo.gc.status VMX property.""" - postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no') + postGcStatus = self._configFile.get(Config.POST_GC_STATUS, "no") postGcStatus = postGcStatus.lower() - if postGcStatus not in ('yes', 'no'): - raise ValueError('PostGcStatus value should be yes/no') - return postGcStatus == 'yes' + if postGcStatus not in ("yes", "no"): + raise ValueError("PostGcStatus value should be yes/no") + return postGcStatus == "yes" @property def default_run_post_script(self): @@ -123,11 +125,22 @@ class Config(object): is absent in VM Tools configuration """ defaultRunPostScript = self._configFile.get( - Config.DEFAULT_RUN_POST_SCRIPT, - 'no') + Config.DEFAULT_RUN_POST_SCRIPT, "no" + ) defaultRunPostScript = defaultRunPostScript.lower() - if defaultRunPostScript not in ('yes', 'no'): - raise ValueError('defaultRunPostScript value should be yes/no') - return defaultRunPostScript == 'yes' + if defaultRunPostScript not in ("yes", "no"): + raise ValueError("defaultRunPostScript value should be yes/no") + return defaultRunPostScript == "yes" + + @property + def meta_data_name(self): + """Return the name of cloud-init meta data.""" + return self._configFile.get(Config.CLOUDINIT_META_DATA, None) + + @property + def user_data_name(self): + """Return the name of cloud-init user data.""" + return self._configFile.get(Config.CLOUDINIT_USER_DATA, None) + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py index 2ab22de9..8240ea8f 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py @@ -9,8 +9,7 @@ import logging import os import stat -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util LOG = logging.getLogger(__name__) @@ -24,8 +23,7 @@ class CustomScriptConstant(object): # The user defined custom script CUSTOM_SCRIPT_NAME = "customize.sh" - CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, - CUSTOM_SCRIPT_NAME) + CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, CUSTOM_SCRIPT_NAME) POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending" # The cc_scripts_per_instance script to launch custom script POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh" @@ -39,22 +37,25 @@ class RunCustomScript(object): def prepare_script(self): if not os.path.exists(self.scriptpath): - raise CustomScriptNotFound("Script %s not found!! " - "Cannot execute custom script!" - % self.scriptpath) + raise CustomScriptNotFound( + "Script %s not found!! Cannot execute custom script!" + % self.scriptpath + ) util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR) - LOG.debug("Copying custom script to %s", - CustomScriptConstant.CUSTOM_SCRIPT) + LOG.debug( + "Copying custom script to %s", CustomScriptConstant.CUSTOM_SCRIPT + ) util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT) # Strip any CR characters from the decoded script - content = util.load_file( - CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "") - util.write_file(CustomScriptConstant.CUSTOM_SCRIPT, - content, - mode=0o544) + content = util.load_file(CustomScriptConstant.CUSTOM_SCRIPT).replace( + "\r", "" + ) + util.write_file( + CustomScriptConstant.CUSTOM_SCRIPT, content, mode=0o544 + ) class PreCustomScript(RunCustomScript): @@ -70,8 +71,8 @@ class PostCustomScript(RunCustomScript): super(PostCustomScript, self).__init__(scriptname, directory) self.ccScriptsDir = ccScriptsDir self.ccScriptPath = os.path.join( - ccScriptsDir, - CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME) + ccScriptsDir, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME + ) def execute(self): """ @@ -81,15 +82,17 @@ class PostCustomScript(RunCustomScript): """ self.prepare_script() - LOG.debug("Copying post customize run script to %s", - self.ccScriptPath) + LOG.debug("Copying post customize run script to %s", self.ccScriptPath) util.copy( - os.path.join(self.directory, - CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME), - self.ccScriptPath) + os.path.join( + self.directory, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME + ), + self.ccScriptPath, + ) st = os.stat(self.ccScriptPath) os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC) LOG.info("Creating post customization pending marker") util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER) + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py index fc034c95..845294ec 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_file.py +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -35,7 +35,7 @@ class ConfigFile(ConfigSource, dict): key = key.strip() val = val.strip() - if key.startswith('-') or '|-' in key: + if key.startswith("-") or "|-" in key: canLog = False else: canLog = True @@ -59,7 +59,7 @@ class ConfigFile(ConfigSource, dict): Keyword arguments: filename - The full path to the config file. """ - logger.info('Parsing the config file %s.', filename) + logger.info("Parsing the config file %s.", filename) config = configparser.ConfigParser() config.optionxform = str @@ -71,7 +71,7 @@ class ConfigFile(ConfigSource, dict): logger.debug("FOUND CATEGORY = '%s'", category) for (key, value) in config.items(category): - self._insertKey(category + '|' + key, value) + self._insertKey(category + "|" + key, value) def should_keep_current_value(self, key): """ @@ -115,4 +115,5 @@ class ConfigFile(ConfigSource, dict): """ return len([key for key in self if key.startswith(prefix)]) + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py index 5899d8f7..3b3b2d5a 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py +++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py @@ -11,4 +11,5 @@ from .config_source import ConfigSource class ConfigNamespace(ConfigSource): """Specifies the Config Namespace.""" + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 9cd2c0c0..6c135f48 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -9,9 +9,8 @@ import logging import os import re -from cloudinit.net.network_state import mask_to_net_prefix -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util +from cloudinit.net.network_state import ipv4_mask_to_net_prefix logger = logging.getLogger(__name__) @@ -63,8 +62,10 @@ class NicConfigurator(object): if not primary_nics: return None elif len(primary_nics) > 1: - raise Exception('There can only be one primary nic', - [nic.mac for nic in primary_nics]) + raise Exception( + "There can only be one primary nic", + [nic.mac for nic in primary_nics], + ) else: return primary_nics[0] @@ -73,17 +74,17 @@ class NicConfigurator(object): Create the mac2Name dictionary The mac address(es) are in the lower case """ - cmd = ['ip', 'addr', 'show'] + cmd = ["ip", "addr", "show"] output, _err = subp.subp(cmd) - sections = re.split(r'\n\d+: ', '\n' + output)[1:] + sections = re.split(r"\n\d+: ", "\n" + output)[1:] - macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' + macPat = r"link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))" for section in sections: match = re.search(macPat, section) if not match: # Only keep info about nics continue mac = match.group(1).lower() - name = section.split(':', 1)[0] + name = section.split(":", 1)[0] self.mac2Name[mac] = name def gen_one_nic(self, nic): @@ -95,11 +96,11 @@ class NicConfigurator(object): mac = nic.mac.lower() name = self.mac2Name.get(mac) if not name: - raise ValueError('No known device has MACADDR: %s' % nic.mac) + raise ValueError("No known device has MACADDR: %s" % nic.mac) nics_cfg_list = [] - cfg = {'type': 'physical', 'name': name, 'mac_address': mac} + cfg = {"type": "physical", "name": name, "mac_address": mac} subnet_list = [] route_list = [] @@ -114,7 +115,7 @@ class NicConfigurator(object): subnet_list.extend(subnets) route_list.extend(routes) - cfg.update({'subnets': subnet_list}) + cfg.update({"subnets": subnet_list}) nics_cfg_list.append(cfg) if route_list: @@ -135,17 +136,17 @@ class NicConfigurator(object): route_list = [] if nic.onboot: - subnet.update({'control': 'auto'}) + subnet.update({"control": "auto"}) bootproto = nic.bootProto.lower() - if nic.ipv4_mode.lower() == 'disabled': - bootproto = 'manual' + if nic.ipv4_mode.lower() == "disabled": + bootproto = "manual" - if bootproto != 'static': - subnet.update({'type': 'dhcp'}) + if bootproto != "static": + subnet.update({"type": "dhcp"}) return ([subnet], route_list) else: - subnet.update({'type': 'static'}) + subnet.update({"type": "static"}) # Static Ipv4 addrs = nic.staticIpv4 @@ -154,20 +155,21 @@ class NicConfigurator(object): v4 = addrs[0] if v4.ip: - subnet.update({'address': v4.ip}) + subnet.update({"address": v4.ip}) if v4.netmask: - subnet.update({'netmask': v4.netmask}) + subnet.update({"netmask": v4.netmask}) # Add the primary gateway if nic.primary and v4.gateways: self.ipv4PrimaryGateway = v4.gateways[0] - subnet.update({'gateway': self.ipv4PrimaryGateway}) + subnet.update({"gateway": self.ipv4PrimaryGateway}) return ([subnet], route_list) # Add routes if there is no primary nic if not self._primaryNic and v4.gateways: subnet.update( - {'routes': self.gen_ipv4_route(nic, v4.gateways, v4.netmask)}) + {"routes": self.gen_ipv4_route(nic, v4.gateways, v4.netmask)} + ) return ([subnet], route_list) @@ -180,14 +182,18 @@ class NicConfigurator(object): """ route_list = [] - cidr = mask_to_net_prefix(netmask) + cidr = ipv4_mask_to_net_prefix(netmask) for gateway in gateways: destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr) - route_list.append({'destination': destination, - 'type': 'route', - 'gateway': gateway, - 'metric': 10000}) + route_list.append( + { + "destination": destination, + "type": "route", + "gateway": gateway, + "metric": 10000, + } + ) return route_list @@ -208,9 +214,11 @@ class NicConfigurator(object): addrs = nic.staticIpv6 for addr in addrs: - subnet = {'type': 'static6', - 'address': addr.ip, - 'netmask': addr.netmask} + subnet = { + "type": "static6", + "address": addr.ip, + "netmask": addr.netmask, + } subnet_list.append(subnet) # TODO: Add the primary gateway @@ -226,9 +234,9 @@ class NicConfigurator(object): route_list = [] for addr in addrs: - route_list.append({'type': 'route', - 'gateway': addr.gateway, - 'metric': 10000}) + route_list.append( + {"type": "route", "gateway": addr.gateway, "metric": 10000} + ) return route_list @@ -246,7 +254,7 @@ class NicConfigurator(object): return nics_cfg_list def clear_dhcp(self): - logger.info('Clearing DHCP leases') + logger.info("Clearing DHCP leases") # Ignore the return code 1. subp.subp(["pkill", "dhclient"], rcs=[0, 1]) @@ -262,11 +270,12 @@ class NicConfigurator(object): logger.info("Debian OS not detected. Skipping the configure step") return - containingDir = '/etc/network' + containingDir = "/etc/network" - interfaceFile = os.path.join(containingDir, 'interfaces') - originalFile = os.path.join(containingDir, - 'interfaces.before_vmware_customization') + interfaceFile = os.path.join(containingDir, "interfaces") + originalFile = os.path.join( + containingDir, "interfaces.before_vmware_customization" + ) if not os.path.exists(originalFile) and os.path.exists(interfaceFile): os.rename(interfaceFile, originalFile) @@ -274,12 +283,13 @@ class NicConfigurator(object): lines = [ "# DO NOT EDIT THIS FILE BY HAND --" " AUTOMATICALLY GENERATED BY cloud-init", - "source /etc/network/interfaces.d/*.cfg", + "source /etc/network/interfaces.d/*", "source-directory /etc/network/interfaces.d", ] - util.write_file(interfaceFile, content='\n'.join(lines)) + util.write_file(interfaceFile, content="\n".join(lines)) self.clear_dhcp() + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py index d16a7690..4d3967a1 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py +++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py @@ -9,8 +9,7 @@ import logging import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util LOG = logging.getLogger(__name__) @@ -20,6 +19,7 @@ class PasswordConfigurator(object): Class for changing configurations related to passwords in a VM. Includes setting and expiring passwords. """ + def configure(self, passwd, resetPasswd, distro): """ Main method to perform all functionalities based on configuration file @@ -28,25 +28,25 @@ class PasswordConfigurator(object): @param resetPasswd: boolean to determine if password needs to be reset. @return cfg: dict to be used by cloud-init set_passwd code. """ - LOG.info('Starting password configuration') + LOG.info("Starting password configuration") if passwd: passwd = util.b64d(passwd) allRootUsers = [] - for line in open('/etc/passwd', 'r'): - if line.split(':')[2] == '0': - allRootUsers.append(line.split(':')[0]) + for line in open("/etc/passwd", "r"): + if line.split(":")[2] == "0": + allRootUsers.append(line.split(":")[0]) # read shadow file and check for each user, if its uid0 or root. uidUsersList = [] - for line in open('/etc/shadow', 'r'): - user = line.split(':')[0] + for line in open("/etc/shadow", "r"): + user = line.split(":")[0] if user in allRootUsers: uidUsersList.append(user) if passwd: - LOG.info('Setting admin password') - distro.set_passwd('root', passwd) + LOG.info("Setting admin password") + distro.set_passwd("root", passwd) if resetPasswd: self.reset_password(uidUsersList) - LOG.info('Configure Password completed!') + LOG.info("Configure Password completed!") def reset_password(self, uidUserList): """ @@ -54,15 +54,19 @@ class PasswordConfigurator(object): not succeeded using passwd command. Log failure message otherwise. @param: list of users for which to expire password. """ - LOG.info('Expiring password.') + LOG.info("Expiring password.") for user in uidUserList: try: - subp.subp(['passwd', '--expire', user]) + subp.subp(["passwd", "--expire", user]) except subp.ProcessExecutionError as e: - if os.path.exists('/usr/bin/chage'): - subp.subp(['chage', '-d', '0', user]) + if os.path.exists("/usr/bin/chage"): + subp.subp(["chage", "-d", "0", user]) else: - LOG.warning('Failed to expire password for %s with error: ' - '%s', user, e) + LOG.warning( + "Failed to expire password for %s with error: %s", + user, + e, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py index 7ec06a9c..e99f9b43 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_source.py +++ b/cloudinit/sources/helpers/vmware/imc/config_source.py @@ -9,4 +9,5 @@ class ConfigSource(object): """Specifies a source for the Config Content.""" + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py index 65ae7390..eda84cfb 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -11,5 +11,7 @@ class GuestCustErrorEnum(object): GUESTCUST_ERROR_SUCCESS = 0 GUESTCUST_ERROR_SCRIPT_DISABLED = 6 + GUESTCUST_ERROR_WRONG_META_FORMAT = 9 + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py index e84c1cb0..33169a7e 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py @@ -14,4 +14,5 @@ class GuestCustEventEnum(object): GUESTCUST_EVENT_ENABLE_NICS = 103 GUESTCUST_EVENT_QUERY_NICS = 104 + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py index a8211dea..c74fbc8b 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py @@ -12,4 +12,5 @@ class GuestCustStateEnum(object): GUESTCUST_STATE_RUNNING = 4 GUESTCUST_STATE_DONE = 5 + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index d919f693..08763e62 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -73,7 +73,7 @@ def get_nics_to_enable(nicsfilepath): if not os.path.exists(nicsfilepath): return None - with open(nicsfilepath, 'r') as fp: + with open(nicsfilepath, "r") as fp: nics = fp.read(NICS_SIZE) return nics @@ -95,7 +95,8 @@ def enable_nics(nics): (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS, - nics) + nics, + ) if not out: time.sleep(enableNicsWaitCount * enableNicsWaitSeconds) continue @@ -108,32 +109,36 @@ def enable_nics(nics): (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS, - nics) + nics, + ) if out and out == NICS_STATUS_CONNECTED: logger.info("NICS are connected on %d second", count) return time.sleep(enableNicsWaitSeconds) - logger.warning("Can't connect network interfaces after %d attempts", - enableNicsWaitRetries) + logger.warning( + "Can't connect network interfaces after %d attempts", + enableNicsWaitRetries, + ) def get_tools_config(section, key, defaultVal): - """ Return the value of [section] key from VMTools configuration. + """Return the value of [section] key from VMTools configuration. - @param section: String of section to read from VMTools config - @returns: String value from key in [section] or defaultVal if - [section] is not present or vmware-toolbox-cmd is - not installed. + @param section: String of section to read from VMTools config + @returns: String value from key in [section] or defaultVal if + [section] is not present or vmware-toolbox-cmd is + not installed. """ - if not subp.which('vmware-toolbox-cmd'): + if not subp.which("vmware-toolbox-cmd"): logger.debug( - 'vmware-toolbox-cmd not installed, returning default value') + "vmware-toolbox-cmd not installed, returning default value" + ) return defaultVal - cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key] + cmd = ["vmware-toolbox-cmd", "config", "get", section, key] try: (outText, _) = subp.subp(cmd) @@ -141,22 +146,27 @@ def get_tools_config(section, key, defaultVal): if e.exit_code == 69: logger.debug( "vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s." - " Return default value: %s", " ".join(cmd), defaultVal) + " Return default value: %s", + " ".join(cmd), + defaultVal, + ) else: logger.error("Failed running %s[%s]", cmd, e.exit_code) logger.exception(e) return defaultVal retValue = defaultVal - m = re.match(r'([^=]+)=(.*)', outText) + m = re.match(r"([^=]+)=(.*)", outText) if m: retValue = m.group(2).strip() - logger.debug("Get tools config: [%s] %s = %s", - section, key, retValue) + logger.debug("Get tools config: [%s] %s = %s", section, key, retValue) else: logger.debug( "Tools config: [%s] %s is not found, return default value: %s", - section, key, retValue) + section, + key, + retValue, + ) return retValue diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py index d793bdeb..673204a0 100644 --- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py +++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py @@ -18,18 +18,19 @@ class Ipv4ModeEnum(object): # The legacy mode which only allows dhcp/static based on whether IPv4 # addresses list is empty or not - IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE' + IPV4_MODE_BACKWARDS_COMPATIBLE = "BACKWARDS_COMPATIBLE" # IPv4 must use static address. Reserved for future use - IPV4_MODE_STATIC = 'STATIC' + IPV4_MODE_STATIC = "STATIC" # IPv4 must use DHCPv4. Reserved for future use - IPV4_MODE_DHCP = 'DHCP' + IPV4_MODE_DHCP = "DHCP" # IPv4 must be disabled - IPV4_MODE_DISABLED = 'DISABLED' + IPV4_MODE_DISABLED = "DISABLED" # IPv4 settings should be left untouched. Reserved for future use - IPV4_MODE_AS_IS = 'AS_IS' + IPV4_MODE_AS_IS = "AS_IS" + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py index ef8f87f7..7b742d0f 100644 --- a/cloudinit/sources/helpers/vmware/imc/nic.py +++ b/cloudinit/sources/helpers/vmware/imc/nic.py @@ -20,7 +20,7 @@ class Nic(NicBase): self._configFile = configFile def _get(self, what): - return self._configFile.get(self.name + '|' + what, None) + return self._configFile.get(self.name + "|" + what, None) def _get_count_with_prefix(self, prefix): return self._configFile.get_count_with_prefix(self.name + prefix) @@ -31,29 +31,29 @@ class Nic(NicBase): @property def mac(self): - return self._get('MACADDR').lower() + return self._get("MACADDR").lower() @property def primary(self): - value = self._get('PRIMARY') + value = self._get("PRIMARY") if value: value = value.lower() - return value == 'yes' or value == 'true' + return value == "yes" or value == "true" else: return False @property def onboot(self): - value = self._get('ONBOOT') + value = self._get("ONBOOT") if value: value = value.lower() - return value == 'yes' or value == 'true' + return value == "yes" or value == "true" else: return False @property def bootProto(self): - value = self._get('BOOTPROTO') + value = self._get("BOOTPROTO") if value: return value.lower() else: @@ -61,7 +61,7 @@ class Nic(NicBase): @property def ipv4_mode(self): - value = self._get('IPv4_MODE') + value = self._get("IPv4_MODE") if value: return value.lower() else: @@ -80,7 +80,7 @@ class Nic(NicBase): @property def staticIpv6(self): - cnt = self._get_count_with_prefix('|IPv6ADDR|') + cnt = self._get_count_with_prefix("|IPv6ADDR|") if not cnt: return None @@ -100,17 +100,17 @@ class StaticIpv4Addr(StaticIpv4Base): @property def ip(self): - return self._nic._get('IPADDR') + return self._nic._get("IPADDR") @property def netmask(self): - return self._nic._get('NETMASK') + return self._nic._get("NETMASK") @property def gateways(self): - value = self._nic._get('GATEWAY') + value = self._nic._get("GATEWAY") if value: - return [x.strip() for x in value.split(',')] + return [x.strip() for x in value.split(",")] else: return None @@ -124,14 +124,15 @@ class StaticIpv6Addr(StaticIpv6Base): @property def ip(self): - return self._nic._get('IPv6ADDR|' + str(self._index)) + return self._nic._get("IPv6ADDR|" + str(self._index)) @property def netmask(self): - return self._nic._get('IPv6NETMASK|' + str(self._index)) + return self._nic._get("IPv6NETMASK|" + str(self._index)) @property def gateway(self): - return self._nic._get('IPv6GATEWAY|' + str(self._index)) + return self._nic._get("IPv6GATEWAY|" + str(self._index)) + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py index de7b866d..37d9602f 100644 --- a/cloudinit/sources/helpers/vmware/imc/nic_base.py +++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py @@ -18,7 +18,7 @@ class NicBase(object): Retrieves the mac address of the nic @return (str) : the MACADDR setting """ - raise NotImplementedError('MACADDR') + raise NotImplementedError("MACADDR") @property def primary(self): @@ -29,7 +29,7 @@ class NicBase(object): be set. @return (bool): the PRIMARY setting """ - raise NotImplementedError('PRIMARY') + raise NotImplementedError("PRIMARY") @property def onboot(self): @@ -37,7 +37,7 @@ class NicBase(object): Retrieves whether the nic should be up at the boot time @return (bool) : the ONBOOT setting """ - raise NotImplementedError('ONBOOT') + raise NotImplementedError("ONBOOT") @property def bootProto(self): @@ -45,7 +45,7 @@ class NicBase(object): Retrieves the boot protocol of the nic @return (str): the BOOTPROTO setting, valid values: dhcp and static. """ - raise NotImplementedError('BOOTPROTO') + raise NotImplementedError("BOOTPROTO") @property def ipv4_mode(self): @@ -54,7 +54,7 @@ class NicBase(object): @return (str): the IPv4_MODE setting, valid values: backwards_compatible, static, dhcp, disabled, as_is """ - raise NotImplementedError('IPv4_MODE') + raise NotImplementedError("IPv4_MODE") @property def staticIpv4(self): @@ -62,7 +62,7 @@ class NicBase(object): Retrieves the static IPv4 configuration of the nic @return (StaticIpv4Base list): the static ipv4 setting """ - raise NotImplementedError('Static IPv4') + raise NotImplementedError("Static IPv4") @property def staticIpv6(self): @@ -70,7 +70,7 @@ class NicBase(object): Retrieves the IPv6 configuration of the nic @return (StaticIpv6Base list): the static ipv6 setting """ - raise NotImplementedError('Static Ipv6') + raise NotImplementedError("Static Ipv6") def validate(self): """ @@ -78,7 +78,7 @@ class NicBase(object): For example, the staticIpv4 property is required and should not be empty when ipv4Mode is STATIC """ - raise NotImplementedError('Check constraints on properties') + raise NotImplementedError("Check constraints on properties") class StaticIpv4Base(object): @@ -93,7 +93,7 @@ class StaticIpv4Base(object): Retrieves the Ipv4 address @return (str): the IPADDR setting """ - raise NotImplementedError('Ipv4 Address') + raise NotImplementedError("Ipv4 Address") @property def netmask(self): @@ -101,7 +101,7 @@ class StaticIpv4Base(object): Retrieves the Ipv4 NETMASK setting @return (str): the NETMASK setting """ - raise NotImplementedError('Ipv4 NETMASK') + raise NotImplementedError("Ipv4 NETMASK") @property def gateways(self): @@ -109,7 +109,7 @@ class StaticIpv4Base(object): Retrieves the gateways on this Ipv4 subnet @return (str list): the GATEWAY setting """ - raise NotImplementedError('Ipv4 GATEWAY') + raise NotImplementedError("Ipv4 GATEWAY") class StaticIpv6Base(object): @@ -123,7 +123,7 @@ class StaticIpv6Base(object): Retrieves the Ipv6 address @return (str): the IPv6ADDR setting """ - raise NotImplementedError('Ipv6 Address') + raise NotImplementedError("Ipv6 Address") @property def netmask(self): @@ -131,7 +131,7 @@ class StaticIpv6Base(object): Retrieves the Ipv6 NETMASK setting @return (str): the IPv6NETMASK setting """ - raise NotImplementedError('Ipv6 NETMASK') + raise NotImplementedError("Ipv6 NETMASK") @property def gateway(self): @@ -139,6 +139,7 @@ class StaticIpv6Base(object): Retrieves the Ipv6 GATEWAY setting @return (str): the IPv6GATEWAY setting """ - raise NotImplementedError('Ipv6 GATEWAY') + raise NotImplementedError("Ipv6 GATEWAY") + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py new file mode 100644 index 00000000..88a21034 --- /dev/null +++ b/cloudinit/sources/helpers/vultr.py @@ -0,0 +1,230 @@ +# Author: Eric Benner <ebenner@vultr.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json +from functools import lru_cache + +from cloudinit import dmi +from cloudinit import log as log +from cloudinit import net, subp, url_helper, util +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError + +# Get LOG +LOG = log.getLogger(__name__) + + +@lru_cache() +def get_metadata(url, timeout, retries, sec_between, agent): + # Bring up interface (and try untill one works) + exception = RuntimeError("Failed to DHCP") + + # Seek iface with DHCP + for iface in net.get_interfaces(): + # Skip dummy, lo interfaces + if "dummy" in iface[0]: + continue + if "lo" == iface[0]: + continue + try: + with EphemeralDHCPv4( + iface=iface[0], connectivity_url_data={"url": url} + ): + # Fetch the metadata + v1 = read_metadata(url, timeout, retries, sec_between, agent) + + return json.loads(v1) + except (NoDHCPLeaseError, subp.ProcessExecutionError) as exc: + LOG.error("DHCP Exception: %s", exc) + exception = exc + raise exception + + +# Read the system information from SMBIOS +def get_sysinfo(): + return { + "manufacturer": dmi.read_dmi_data("system-manufacturer"), + "subid": dmi.read_dmi_data("system-serial-number"), + } + + +# Assumes is Vultr is already checked +def is_baremetal(): + if get_sysinfo()["manufacturer"] != "Vultr": + return True + return False + + +# Confirm is Vultr +def is_vultr(): + # VC2, VDC, and HFC use DMI + sysinfo = get_sysinfo() + + if sysinfo["manufacturer"] == "Vultr": + return True + + # Baremetal requires a kernel parameter + if "vultr" in util.get_cmdline().split(): + return True + + return False + + +# Read Metadata endpoint +def read_metadata(url, timeout, retries, sec_between, agent): + url = "%s/v1.json" % url + + # Announce os details so we can handle non Vultr origin + # images and provide correct vendordata generation. + headers = {"Metadata-Token": "cloudinit", "User-Agent": agent} + + response = url_helper.readurl( + url, + timeout=timeout, + retries=retries, + headers=headers, + sec_between=sec_between, + ) + + if not response.ok(): + raise RuntimeError( + "Failed to connect to %s: Code: %s" % url, response.code + ) + + return response.contents.decode() + + +# Wrapped for caching +@lru_cache() +def get_interface_map(): + return net.get_interfaces_by_mac() + + +# Convert macs to nics +def get_interface_name(mac): + macs_to_nic = get_interface_map() + + if mac not in macs_to_nic: + return None + + return macs_to_nic.get(mac) + + +# Generate network configs +def generate_network_config(interfaces): + network = { + "version": 1, + "config": [{"type": "nameserver", "address": ["108.61.10.10"]}], + } + + # Prepare interface 0, public + if len(interfaces) > 0: + public = generate_interface(interfaces[0], primary=True) + network["config"].append(public) + + # Prepare additional interfaces, private + for i in range(1, len(interfaces)): + private = generate_interface(interfaces[i]) + network["config"].append(private) + + return network + + +def generate_interface(interface, primary=False): + interface_name = get_interface_name(interface["mac"]) + if not interface_name: + raise RuntimeError( + "Interface: %s could not be found on the system" % interface["mac"] + ) + + netcfg = { + "name": interface_name, + "type": "physical", + "mac_address": interface["mac"], + } + + if primary: + netcfg["accept-ra"] = 1 + netcfg["subnets"] = [ + {"type": "dhcp", "control": "auto"}, + {"type": "ipv6_slaac", "control": "auto"}, + ] + + if not primary: + netcfg["subnets"] = [ + { + "type": "static", + "control": "auto", + "address": interface["ipv4"]["address"], + "netmask": interface["ipv4"]["netmask"], + } + ] + + generate_interface_routes(interface, netcfg) + generate_interface_additional_addresses(interface, netcfg) + + # Add config to template + return netcfg + + +def generate_interface_routes(interface, netcfg): + # Options that may or may not be used + if "mtu" in interface: + netcfg["mtu"] = interface["mtu"] + + if "accept-ra" in interface: + netcfg["accept-ra"] = interface["accept-ra"] + + if "routes" in interface: + netcfg["subnets"][0]["routes"] = interface["routes"] + + +def generate_interface_additional_addresses(interface, netcfg): + # Check for additional IP's + additional_count = len(interface["ipv4"]["additional"]) + if "ipv4" in interface and additional_count > 0: + for additional in interface["ipv4"]["additional"]: + add = { + "type": "static", + "control": "auto", + "address": additional["address"], + "netmask": additional["netmask"], + } + + if "routes" in additional: + add["routes"] = additional["routes"] + + netcfg["subnets"].append(add) + + # Check for additional IPv6's + additional_count = len(interface["ipv6"]["additional"]) + if "ipv6" in interface and additional_count > 0: + for additional in interface["ipv6"]["additional"]: + add = { + "type": "static6", + "control": "auto", + "address": "%s/%s" + % (additional["network"], additional["prefix"]), + } + + if "routes" in additional: + add["routes"] = additional["routes"] + + netcfg["subnets"].append(add) + + +# Make required adjustments to the network configs provided +def add_interface_names(interfaces): + for interface in interfaces: + interface_name = get_interface_name(interface["mac"]) + if not interface_name: + raise RuntimeError( + "Interface: %s could not be found on the system" + % interface["mac"] + ) + interface["name"] = interface_name + + return interfaces + + +# vi: ts=4 expandtab |