summaryrefslogtreecommitdiff
path: root/cloudinit/sources/helpers
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2021-12-15 20:16:38 -0600
committerGitHub <noreply@github.com>2021-12-15 19:16:38 -0700
commitbae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf (patch)
tree1fbb3269fc87e39832e3286ef42eefd2b23fcd44 /cloudinit/sources/helpers
parent2bcf4fa972fde686c2e3141c58e640640b44dd00 (diff)
downloadvyos-cloud-init-bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf.tar.gz
vyos-cloud-init-bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf.zip
Adopt Black and isort (SC-700) (#1157)
Applied Black and isort, fixed any linting issues, updated tox.ini and CI.
Diffstat (limited to 'cloudinit/sources/helpers')
-rwxr-xr-xcloudinit/sources/helpers/azure.py693
-rw-r--r--cloudinit/sources/helpers/digitalocean.py195
-rw-r--r--cloudinit/sources/helpers/hetzner.py15
-rw-r--r--cloudinit/sources/helpers/netlink.py187
-rw-r--r--cloudinit/sources/helpers/openstack.py438
-rw-r--r--cloudinit/sources/helpers/upcloud.py12
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py5
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py59
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py84
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py38
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py46
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py11
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py33
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py29
-rw-r--r--cloudinit/sources/helpers/vultr.py172
22 files changed, 1144 insertions, 930 deletions
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index a5ac1d57..50058fe0 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -6,27 +6,28 @@ import os
import re
import socket
import struct
-import time
import textwrap
+import time
import zlib
-from errno import ENOENT
-
-from cloudinit.settings import CFG_BUILTIN
-from cloudinit.net import dhcp
-from cloudinit import stages
-from cloudinit import temp_utils
from contextlib import contextmanager
+from datetime import datetime
+from errno import ENOENT
from xml.etree import ElementTree
from xml.sax.saxutils import escape
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
-from cloudinit import version
-from cloudinit import distros
-from cloudinit.reporting import events
+from cloudinit import (
+ distros,
+ stages,
+ subp,
+ temp_utils,
+ url_helper,
+ util,
+ version,
+)
+from cloudinit.net import dhcp
from cloudinit.net.dhcp import EphemeralDHCPv4
-from datetime import datetime
+from cloudinit.reporting import events
+from cloudinit.settings import CFG_BUILTIN
LOG = logging.getLogger(__name__)
@@ -34,10 +35,10 @@ LOG = logging.getLogger(__name__)
# value is applied if the endpoint can't be found within a lease file
DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
-BOOT_EVENT_TYPE = 'boot-telemetry'
-SYSTEMINFO_EVENT_TYPE = 'system-info'
-DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
-COMPRESSED_EVENT_TYPE = 'compressed'
+BOOT_EVENT_TYPE = "boot-telemetry"
+SYSTEMINFO_EVENT_TYPE = "system-info"
+DIAGNOSTIC_EVENT_TYPE = "diagnostic"
+COMPRESSED_EVENT_TYPE = "compressed"
# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP
# at once. This number is based on the analysis done on a large sample of
# cloud-init.log files where the P95 of the file sizes was 537KB and the time
@@ -45,25 +46,29 @@ COMPRESSED_EVENT_TYPE = 'compressed'
MAX_LOG_TO_KVP_LENGTH = 512000
# File to store the last byte of cloud-init.log that was pushed to KVP. This
# file will be deleted with every VM reboot.
-LOG_PUSHED_TO_KVP_INDEX_FILE = '/run/cloud-init/log_pushed_to_kvp_index'
+LOG_PUSHED_TO_KVP_INDEX_FILE = "/run/cloud-init/log_pushed_to_kvp_index"
azure_ds_reporter = events.ReportEventStack(
name="azure-ds",
description="initialize reporter for azure ds",
- reporting_enabled=True)
+ reporting_enabled=True,
+)
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE = (
- 'The VM encountered an error during deployment. '
- 'Please visit https://aka.ms/linuxprovisioningerror '
- 'for more information on remediation.')
+ "The VM encountered an error during deployment. "
+ "Please visit https://aka.ms/linuxprovisioningerror "
+ "for more information on remediation."
+)
def azure_ds_telemetry_reporter(func):
def impl(*args, **kwargs):
with events.ReportEventStack(
- name=func.__name__,
- description=func.__name__,
- parent=azure_ds_reporter):
+ name=func.__name__,
+ description=func.__name__,
+ parent=azure_ds_reporter,
+ ):
return func(*args, **kwargs)
+
return impl
@@ -79,16 +84,16 @@ def is_byte_swapped(previous_id, current_id):
def swap_bytestring(s, width=2):
dd = [byte for byte in textwrap.wrap(s, 2)]
dd.reverse()
- return ''.join(dd)
+ return "".join(dd)
- parts = current_id.split('-')
- swapped_id = '-'.join(
+ parts = current_id.split("-")
+ swapped_id = "-".join(
[
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
- parts[4]
+ parts[4],
]
)
@@ -98,31 +103,29 @@ def is_byte_swapped(previous_id, current_id):
@azure_ds_telemetry_reporter
def get_boot_telemetry():
"""Report timestamps related to kernel initialization and systemd
- activation of cloud-init"""
+ activation of cloud-init"""
if not distros.uses_systemd():
- raise RuntimeError(
- "distro not using systemd, skipping boot telemetry")
+ raise RuntimeError("distro not using systemd, skipping boot telemetry")
LOG.debug("Collecting boot telemetry")
try:
kernel_start = float(time.time()) - float(util.uptime())
except ValueError as e:
- raise RuntimeError(
- "Failed to determine kernel start timestamp"
- ) from e
+ raise RuntimeError("Failed to determine kernel start timestamp") from e
try:
- out, _ = subp.subp(['/bin/systemctl',
- 'show', '-p',
- 'UserspaceTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ ["/bin/systemctl", "show", "-p", "UserspaceTimestampMonotonic"],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "UserspaceTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse UserspaceTimestampMonotonic from systemd"
+ )
user_start = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -135,16 +138,23 @@ def get_boot_telemetry():
) from e
try:
- out, _ = subp.subp(['/bin/systemctl', 'show',
- 'cloud-init-local', '-p',
- 'InactiveExitTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ [
+ "/bin/systemctl",
+ "show",
+ "cloud-init-local",
+ "-p",
+ "InactiveExitTimestampMonotonic",
+ ],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "InactiveExitTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse InactiveExitTimestampMonotonic from systemd"
+ )
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -158,12 +168,16 @@ def get_boot_telemetry():
) from e
evt = events.ReportingEvent(
- BOOT_EVENT_TYPE, 'boot-telemetry',
- "kernel_start=%s user_start=%s cloudinit_activation=%s" %
- (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(user_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'),
- events.DEFAULT_EVENT_ORIGIN)
+ BOOT_EVENT_TYPE,
+ "boot-telemetry",
+ "kernel_start=%s user_start=%s cloudinit_activation=%s"
+ % (
+ datetime.utcfromtimestamp(kernel_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(user_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(cloudinit_activation).isoformat() + "Z",
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -175,13 +189,22 @@ def get_system_info():
"""Collect and report system information"""
info = util.system_info()
evt = events.ReportingEvent(
- SYSTEMINFO_EVENT_TYPE, 'system information',
+ SYSTEMINFO_EVENT_TYPE,
+ "system information",
"cloudinit_version=%s, kernel_version=%s, variant=%s, "
"distro_name=%s, distro_version=%s, flavor=%s, "
- "python_version=%s" %
- (version.version_string(), info['release'], info['variant'],
- info['dist'][0], info['dist'][1], info['dist'][2],
- info['python']), events.DEFAULT_EVENT_ORIGIN)
+ "python_version=%s"
+ % (
+ version.version_string(),
+ info["release"],
+ info["variant"],
+ info["dist"][0],
+ info["dist"][1],
+ info["dist"][2],
+ info["python"],
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -189,13 +212,17 @@ def get_system_info():
def report_diagnostic_event(
- msg: str, *, logger_func=None) -> events.ReportingEvent:
+ msg: str, *, logger_func=None
+) -> events.ReportingEvent:
"""Report a diagnostic event"""
if callable(logger_func):
logger_func(msg)
evt = events.ReportingEvent(
- DIAGNOSTIC_EVENT_TYPE, 'diagnostic message',
- msg, events.DEFAULT_EVENT_ORIGIN)
+ DIAGNOSTIC_EVENT_TYPE,
+ "diagnostic message",
+ msg,
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt, excluded_handler_types={"log"})
# return the event for unit testing purpose
@@ -205,21 +232,26 @@ def report_diagnostic_event(
def report_compressed_event(event_name, event_content):
"""Report a compressed event"""
compressed_data = base64.encodebytes(zlib.compress(event_content))
- event_data = {"encoding": "gz+b64",
- "data": compressed_data.decode('ascii')}
+ event_data = {
+ "encoding": "gz+b64",
+ "data": compressed_data.decode("ascii"),
+ }
evt = events.ReportingEvent(
- COMPRESSED_EVENT_TYPE, event_name,
+ COMPRESSED_EVENT_TYPE,
+ event_name,
json.dumps(event_data),
- events.DEFAULT_EVENT_ORIGIN)
- events.report_event(evt,
- excluded_handler_types={"log", "print", "webhook"})
+ events.DEFAULT_EVENT_ORIGIN,
+ )
+ events.report_event(
+ evt, excluded_handler_types={"log", "print", "webhook"}
+ )
# return the event for unit testing purpose
return evt
@azure_ds_telemetry_reporter
-def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
+def push_log_to_kvp(file_name=CFG_BUILTIN["def_log_file"]):
"""Push a portion of cloud-init.log file or the whole file to KVP
based on the file size.
The first time this function is called after VM boot, It will push the last
@@ -237,23 +269,26 @@ def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
report_diagnostic_event(
"Dumping last {0} bytes of cloud-init.log file to KVP starting"
" from index: {1}".format(f.tell() - seek_index, seek_index),
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
f.seek(seek_index, os.SEEK_SET)
report_compressed_event("cloud-init.log", f.read())
util.write_file(LOG_PUSHED_TO_KVP_INDEX_FILE, str(f.tell()))
except Exception as ex:
report_diagnostic_event(
"Exception when dumping log file: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
LOG.debug("Dumping dmesg log to KVP")
try:
- out, _ = subp.subp(['dmesg'], decode=False, capture=True)
+ out, _ = subp.subp(["dmesg"], decode=False, capture=True)
report_compressed_event("dmesg", out)
except Exception as ex:
report_diagnostic_event(
"Exception when dumping dmesg log: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
@azure_ds_telemetry_reporter
@@ -263,16 +298,20 @@ def get_last_log_byte_pushed_to_kvp_index():
return int(f.read())
except IOError as e:
if e.errno != ENOENT:
- report_diagnostic_event("Reading LOG_PUSHED_TO_KVP_INDEX_FILE"
- " failed: %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Reading LOG_PUSHED_TO_KVP_INDEX_FILE failed: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except ValueError as e:
- report_diagnostic_event("Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE"
- ": %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except Exception as e:
- report_diagnostic_event("Failed to get the last log byte pushed to KVP"
- ": %s." % repr(e), logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Failed to get the last log byte pushed to KVP: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
return 0
@@ -306,28 +345,31 @@ def http_with_retries(url, **kwargs) -> str:
sleep_duration_between_retries = 5
periodic_logging_attempts = 12
- if 'timeout' not in kwargs:
- kwargs['timeout'] = default_readurl_timeout
+ if "timeout" not in kwargs:
+ kwargs["timeout"] = default_readurl_timeout
# remove kwargs that cause url_helper.readurl to retry,
# since we are already implementing our own retry logic.
- if kwargs.pop('retries', None):
+ if kwargs.pop("retries", None):
LOG.warning(
- 'Ignoring retries kwarg passed in for '
- 'communication with Azure endpoint.')
- if kwargs.pop('infinite', None):
+ "Ignoring retries kwarg passed in for "
+ "communication with Azure endpoint."
+ )
+ if kwargs.pop("infinite", None):
LOG.warning(
- 'Ignoring infinite kwarg passed in for communication '
- 'with Azure endpoint.')
+ "Ignoring infinite kwarg passed in for communication "
+ "with Azure endpoint."
+ )
for attempt in range(1, max_readurl_attempts + 1):
try:
ret = url_helper.readurl(url, **kwargs)
report_diagnostic_event(
- 'Successful HTTP request with Azure endpoint %s after '
- '%d attempts' % (url, attempt),
- logger_func=LOG.debug)
+ "Successful HTTP request with Azure endpoint %s after "
+ "%d attempts" % (url, attempt),
+ logger_func=LOG.debug,
+ )
return ret
@@ -335,20 +377,20 @@ def http_with_retries(url, **kwargs) -> str:
exc = e
if attempt % periodic_logging_attempts == 0:
report_diagnostic_event(
- 'Failed HTTP request with Azure endpoint %s during '
- 'attempt %d with exception: %s' %
- (url, attempt, e),
- logger_func=LOG.debug)
+ "Failed HTTP request with Azure endpoint %s during "
+ "attempt %d with exception: %s" % (url, attempt, e),
+ logger_func=LOG.debug,
+ )
time.sleep(sleep_duration_between_retries)
raise exc
def build_minimal_ovf(
- username: str,
- hostname: str,
- disableSshPwd: str) -> bytes:
- OVF_ENV_TEMPLATE = textwrap.dedent('''\
+ username: str, hostname: str, disableSshPwd: str
+) -> bytes:
+ OVF_ENV_TEMPLATE = textwrap.dedent(
+ """\
<ns0:Environment xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
xmlns:ns1="http://schemas.microsoft.com/windowsazure"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
@@ -370,19 +412,19 @@ def build_minimal_ovf(
</ns1:PlatformSettings>
</ns1:PlatformSettingsSection>
</ns0:Environment>
- ''')
+ """
+ )
ret = OVF_ENV_TEMPLATE.format(
- username=username,
- hostname=hostname,
- disableSshPwd=disableSshPwd)
- return ret.encode('utf-8')
+ username=username, hostname=hostname, disableSshPwd=disableSshPwd
+ )
+ return ret.encode("utf-8")
class AzureEndpointHttpClient:
headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
+ "x-ms-agent-name": "WALinuxAgent",
+ "x-ms-version": "2012-11-30",
}
def __init__(self, certificate):
@@ -403,8 +445,7 @@ class AzureEndpointHttpClient:
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return http_with_retries(
- url, data=data, headers=headers)
+ return http_with_retries(url, data=data, headers=headers)
class InvalidGoalStateXMLException(Exception):
@@ -412,12 +453,12 @@ class InvalidGoalStateXMLException(Exception):
class GoalState:
-
def __init__(
- self,
- unparsed_xml: str,
- azure_endpoint_client: AzureEndpointHttpClient,
- need_certificate: bool = True) -> None:
+ self,
+ unparsed_xml: str,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ need_certificate: bool = True,
+ ) -> None:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_xml: string representing a GoalState XML.
@@ -431,36 +472,41 @@ class GoalState:
self.root = ElementTree.fromstring(unparsed_xml)
except ElementTree.ParseError as e:
report_diagnostic_event(
- 'Failed to parse GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Failed to parse GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- self.container_id = self._text_from_xpath('./Container/ContainerId')
+ self.container_id = self._text_from_xpath("./Container/ContainerId")
self.instance_id = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance/InstanceId')
- self.incarnation = self._text_from_xpath('./Incarnation')
+ "./Container/RoleInstanceList/RoleInstance/InstanceId"
+ )
+ self.incarnation = self._text_from_xpath("./Incarnation")
for attr in ("container_id", "instance_id", "incarnation"):
if getattr(self, attr) is None:
- msg = 'Missing %s in GoalState XML' % attr
+ msg = "Missing %s in GoalState XML" % attr
report_diagnostic_event(msg, logger_func=LOG.warning)
raise InvalidGoalStateXMLException(msg)
self.certificates_xml = None
url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
+ "./Container/RoleInstanceList/RoleInstance"
+ "/Configuration/Certificates"
+ )
if url is not None and need_certificate:
with events.ReportEventStack(
- name="get-certificates-xml",
- description="get certificates xml",
- parent=azure_ds_reporter):
- self.certificates_xml = \
- self.azure_endpoint_client.get(
- url, secure=True).contents
+ name="get-certificates-xml",
+ description="get certificates xml",
+ parent=azure_ds_reporter,
+ ):
+ self.certificates_xml = self.azure_endpoint_client.get(
+ url, secure=True
+ ).contents
if self.certificates_xml is None:
raise InvalidGoalStateXMLException(
- 'Azure endpoint returned empty certificates xml.')
+ "Azure endpoint returned empty certificates xml."
+ )
def _text_from_xpath(self, xpath):
element = self.root.find(xpath)
@@ -472,8 +518,8 @@ class GoalState:
class OpenSSLManager:
certificate_names = {
- 'private_key': 'TransportPrivate.pem',
- 'certificate': 'TransportCert.pem',
+ "private_key": "TransportPrivate.pem",
+ "certificate": "TransportCert.pem",
}
def __init__(self):
@@ -494,35 +540,47 @@ class OpenSSLManager:
@azure_ds_telemetry_reporter
def generate_certificate(self):
- LOG.debug('Generating certificate for communication with fabric...')
+ LOG.debug("Generating certificate for communication with fabric...")
if self.certificate is not None:
- LOG.debug('Certificate already generated.')
+ LOG.debug("Certificate already generated.")
return
with cd(self.tmpdir):
- subp.subp([
- 'openssl', 'req', '-x509', '-nodes', '-subj',
- '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
- '-keyout', self.certificate_names['private_key'],
- '-out', self.certificate_names['certificate'],
- ])
- certificate = ''
- for line in open(self.certificate_names['certificate']):
+ subp.subp(
+ [
+ "openssl",
+ "req",
+ "-x509",
+ "-nodes",
+ "-subj",
+ "/CN=LinuxTransport",
+ "-days",
+ "32768",
+ "-newkey",
+ "rsa:2048",
+ "-keyout",
+ self.certificate_names["private_key"],
+ "-out",
+ self.certificate_names["certificate"],
+ ]
+ )
+ certificate = ""
+ for line in open(self.certificate_names["certificate"]):
if "CERTIFICATE" not in line:
certificate += line.rstrip()
self.certificate = certificate
- LOG.debug('New certificate generated.')
+ LOG.debug("New certificate generated.")
@staticmethod
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
- cmd = ['openssl', 'x509', '-noout', action]
+ cmd = ["openssl", "x509", "-noout", action]
result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
- pub_key = self._run_x509_action('-pubkey', certificate)
- keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
+ pub_key = self._run_x509_action("-pubkey", certificate)
+ keygen_cmd = ["ssh-keygen", "-i", "-m", "PKCS8", "-f", "/dev/stdin"]
ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@@ -535,48 +593,50 @@ class OpenSSLManager:
Azure control plane passes that fingerprint as so:
'073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
"""
- raw_fp = self._run_x509_action('-fingerprint', certificate)
- eq = raw_fp.find('=')
- octets = raw_fp[eq+1:-1].split(':')
- return ''.join(octets)
+ raw_fp = self._run_x509_action("-fingerprint", certificate)
+ eq = raw_fp.find("=")
+ octets = raw_fp[eq + 1 : -1].split(":")
+ return "".join(octets)
@azure_ds_telemetry_reporter
def _decrypt_certs_from_xml(self, certificates_xml):
"""Decrypt the certificates XML document using the our private key;
- return the list of certs and private keys contained in the doc.
+ return the list of certs and private keys contained in the doc.
"""
- tag = ElementTree.fromstring(certificates_xml).find('.//Data')
+ tag = ElementTree.fromstring(certificates_xml).find(".//Data")
certificates_content = tag.text
lines = [
- b'MIME-Version: 1.0',
+ b"MIME-Version: 1.0",
b'Content-Disposition: attachment; filename="Certificates.p7m"',
b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
- b'Content-Transfer-Encoding: base64',
- b'',
- certificates_content.encode('utf-8'),
+ b"Content-Transfer-Encoding: base64",
+ b"",
+ certificates_content.encode("utf-8"),
]
with cd(self.tmpdir):
out, _ = subp.subp(
- 'openssl cms -decrypt -in /dev/stdin -inkey'
- ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
- ' -password pass:'.format(**self.certificate_names),
- shell=True, data=b'\n'.join(lines))
+ "openssl cms -decrypt -in /dev/stdin -inkey"
+ " {private_key} -recip {certificate} | openssl pkcs12 -nodes"
+ " -password pass:".format(**self.certificate_names),
+ shell=True,
+ data=b"\n".join(lines),
+ )
return out
@azure_ds_telemetry_reporter
def parse_certificates(self, certificates_xml):
"""Given the Certificates XML document, return a dictionary of
- fingerprints and associated SSH keys derived from the certs."""
+ fingerprints and associated SSH keys derived from the certs."""
out = self._decrypt_certs_from_xml(certificates_xml)
current = []
keys = {}
for line in out.splitlines():
current.append(line)
- if re.match(r'[-]+END .*?KEY[-]+$', line):
+ if re.match(r"[-]+END .*?KEY[-]+$", line):
# ignore private_keys
current = []
- elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificate = '\n'.join(current)
+ elif re.match(r"[-]+END .*?CERTIFICATE[-]+$", line):
+ certificate = "\n".join(current)
ssh_key = self._get_ssh_key_from_cert(certificate)
fingerprint = self._get_fingerprint_from_cert(certificate)
keys[fingerprint] = ssh_key
@@ -586,7 +646,8 @@ class OpenSSLManager:
class GoalStateHealthReporter:
- HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent(
+ """\
<?xml version="1.0" encoding="utf-8"?>
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
@@ -604,25 +665,30 @@ class GoalStateHealthReporter:
</RoleInstanceList>
</Container>
</Health>
- ''')
+ """
+ )
- HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent(
+ """\
<Details>
<SubStatus>{health_substatus}</SubStatus>
<Description>{health_description}</Description>
</Details>
- ''')
+ """
+ )
- PROVISIONING_SUCCESS_STATUS = 'Ready'
- PROVISIONING_NOT_READY_STATUS = 'NotReady'
- PROVISIONING_FAILURE_SUBSTATUS = 'ProvisioningFailed'
+ PROVISIONING_SUCCESS_STATUS = "Ready"
+ PROVISIONING_NOT_READY_STATUS = "NotReady"
+ PROVISIONING_FAILURE_SUBSTATUS = "ProvisioningFailed"
HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
def __init__(
- self, goal_state: GoalState,
- azure_endpoint_client: AzureEndpointHttpClient,
- endpoint: str) -> None:
+ self,
+ goal_state: GoalState,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ endpoint: str,
+ ) -> None:
"""Creates instance that will report provisioning status to an endpoint
@param goal_state: An instance of class GoalState that contains
@@ -644,17 +710,19 @@ class GoalStateHealthReporter:
incarnation=self._goal_state.incarnation,
container_id=self._goal_state.container_id,
instance_id=self._goal_state.instance_id,
- status=self.PROVISIONING_SUCCESS_STATUS)
- LOG.debug('Reporting ready to Azure fabric.')
+ status=self.PROVISIONING_SUCCESS_STATUS,
+ )
+ LOG.debug("Reporting ready to Azure fabric.")
try:
self._post_health_report(document=document)
except Exception as e:
report_diagnostic_event(
"exception while reporting ready: %s" % e,
- logger_func=LOG.error)
+ logger_func=LOG.error,
+ )
raise
- LOG.info('Reported ready to Azure fabric.')
+ LOG.info("Reported ready to Azure fabric.")
@azure_ds_telemetry_reporter
def send_failure_signal(self, description: str) -> None:
@@ -664,7 +732,8 @@ class GoalStateHealthReporter:
instance_id=self._goal_state.instance_id,
status=self.PROVISIONING_NOT_READY_STATUS,
substatus=self.PROVISIONING_FAILURE_SUBSTATUS,
- description=description)
+ description=description,
+ )
try:
self._post_health_report(document=document)
except Exception as e:
@@ -672,24 +741,33 @@ class GoalStateHealthReporter:
report_diagnostic_event(msg, logger_func=LOG.error)
raise
- LOG.warning('Reported failure to Azure fabric.')
+ LOG.warning("Reported failure to Azure fabric.")
def build_report(
- self, incarnation: str, container_id: str, instance_id: str,
- status: str, substatus=None, description=None) -> str:
- health_detail = ''
+ self,
+ incarnation: str,
+ container_id: str,
+ instance_id: str,
+ status: str,
+ substatus=None,
+ description=None,
+ ) -> str:
+ health_detail = ""
if substatus is not None:
health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
health_substatus=escape(substatus),
health_description=escape(
- description[:self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]))
+ description[: self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]
+ ),
+ )
health_report = self.HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(str(incarnation)),
container_id=escape(container_id),
instance_id=escape(instance_id),
health_status=escape(status),
- health_detail_subsection=health_detail)
+ health_detail_subsection=health_detail,
+ )
return health_report
@@ -717,20 +795,22 @@ class GoalStateHealthReporter:
# reporting handler that writes to the special KVP files.
time.sleep(0)
- LOG.debug('Sending health report to Azure fabric.')
+ LOG.debug("Sending health report to Azure fabric.")
url = "http://{}/machine?comp=health".format(self._endpoint)
self._azure_endpoint_client.post(
url,
data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'})
- LOG.debug('Successfully sent health report to Azure fabric')
+ extra_headers={"Content-Type": "text/xml; charset=utf-8"},
+ )
+ LOG.debug("Successfully sent health report to Azure fabric")
class WALinuxAgentShim:
-
def __init__(self, fallback_lease_file=None, dhcp_options=None):
- LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
- fallback_lease_file)
+ LOG.debug(
+ "WALinuxAgentShim instantiated, fallback_lease_file=%s",
+ fallback_lease_file,
+ )
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
@@ -749,30 +829,33 @@ class WALinuxAgentShim:
@property
def endpoint(self):
if self._endpoint is None:
- self._endpoint = self.find_endpoint(self.lease_file,
- self.dhcpoptions)
+ self._endpoint = self.find_endpoint(
+ self.lease_file, self.dhcpoptions
+ )
return self._endpoint
@staticmethod
def get_ip_from_lease_value(fallback_lease_value):
- unescaped_value = fallback_lease_value.replace('\\', '')
+ unescaped_value = fallback_lease_value.replace("\\", "")
if len(unescaped_value) > 4:
- hex_string = ''
- for hex_pair in unescaped_value.split(':'):
+ hex_string = ""
+ for hex_pair in unescaped_value.split(":"):
if len(hex_pair) == 1:
- hex_pair = '0' + hex_pair
+ hex_pair = "0" + hex_pair
hex_string += hex_pair
packed_bytes = struct.pack(
- '>L', int(hex_string.replace(':', ''), 16))
+ ">L", int(hex_string.replace(":", ""), 16)
+ )
else:
- packed_bytes = unescaped_value.encode('utf-8')
+ packed_bytes = unescaped_value.encode("utf-8")
return socket.inet_ntoa(packed_bytes)
@staticmethod
@azure_ds_telemetry_reporter
def _networkd_get_value_from_leases(leases_d=None):
return dhcp.networkd_get_option_from_leases(
- 'OPTION_245', leases_d=leases_d)
+ "OPTION_245", leases_d=leases_d
+ )
@staticmethod
@azure_ds_telemetry_reporter
@@ -790,7 +873,7 @@ class WALinuxAgentShim:
if option_name in line:
# Example line from Ubuntu
# option unknown-245 a8:3f:81:10;
- leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"'))
+ leases.append(line.strip(" ").split(" ", 2)[-1].strip(';\n"'))
# Return the "most recent" one in the list
if len(leases) < 1:
return None
@@ -805,15 +888,16 @@ class WALinuxAgentShim:
if not os.path.exists(hooks_dir):
LOG.debug("%s not found.", hooks_dir)
return None
- hook_files = [os.path.join(hooks_dir, x)
- for x in os.listdir(hooks_dir)]
+ hook_files = [
+ os.path.join(hooks_dir, x) for x in os.listdir(hooks_dir)
+ ]
for hook_file in hook_files:
try:
- name = os.path.basename(hook_file).replace('.json', '')
+ name = os.path.basename(hook_file).replace(".json", "")
dhcp_options[name] = json.loads(util.load_file((hook_file)))
except ValueError as e:
raise ValueError(
- '{_file} is not valid JSON data'.format(_file=hook_file)
+ "{_file} is not valid JSON data".format(_file=hook_file)
) from e
return dhcp_options
@@ -825,7 +909,7 @@ class WALinuxAgentShim:
# the MS endpoint server is given to us as DHPC option 245
_value = None
for interface in dhcp_options:
- _value = dhcp_options[interface].get('unknown_245', None)
+ _value = dhcp_options[interface].get("unknown_245", None)
if _value is not None:
LOG.debug("Endpoint server found in dhclient options")
break
@@ -855,63 +939,73 @@ class WALinuxAgentShim:
LOG.debug("Using Azure Endpoint from dhcp options")
if value is None:
report_diagnostic_event(
- 'No Azure endpoint from dhcp options. '
- 'Finding Azure endpoint from networkd...',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhcp options. "
+ "Finding Azure endpoint from networkd...",
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._networkd_get_value_from_leases()
if value is None:
# Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
# a dhclient exit hook that calls cloud-init-dhclient-hook
report_diagnostic_event(
- 'No Azure endpoint from networkd. '
- 'Finding Azure endpoint from hook json...',
- logger_func=LOG.debug)
+ "No Azure endpoint from networkd. "
+ "Finding Azure endpoint from hook json...",
+ logger_func=LOG.debug,
+ )
dhcp_options = WALinuxAgentShim._load_dhclient_json()
value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
# Fallback and check the leases file if unsuccessful
report_diagnostic_event(
- 'No Azure endpoint from dhclient logs. '
- 'Unable to find endpoint in dhclient logs. '
- 'Falling back to check lease files',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhclient logs. "
+ "Unable to find endpoint in dhclient logs. "
+ "Falling back to check lease files",
+ logger_func=LOG.debug,
+ )
if fallback_lease_file is None:
report_diagnostic_event(
- 'No fallback lease file was specified.',
- logger_func=LOG.warning)
+ "No fallback lease file was specified.",
+ logger_func=LOG.warning,
+ )
value = None
else:
report_diagnostic_event(
- 'Looking for endpoint in lease file %s'
- % fallback_lease_file, logger_func=LOG.debug)
+ "Looking for endpoint in lease file %s"
+ % fallback_lease_file,
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._get_value_from_leases_file(
- fallback_lease_file)
+ fallback_lease_file
+ )
if value is None:
value = DEFAULT_WIRESERVER_ENDPOINT
report_diagnostic_event(
- 'No lease found; using default endpoint: %s' % value,
- logger_func=LOG.warning)
+ "No lease found; using default endpoint: %s" % value,
+ logger_func=LOG.warning,
+ )
endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
report_diagnostic_event(
- 'Azure endpoint found at %s' % endpoint_ip_address,
- logger_func=LOG.debug)
+ "Azure endpoint found at %s" % endpoint_ip_address,
+ logger_func=LOG.debug,
+ )
return endpoint_ip_address
@azure_ds_telemetry_reporter
def eject_iso(self, iso_dev) -> None:
try:
LOG.debug("Ejecting the provisioning iso")
- subp.subp(['eject', iso_dev])
+ subp.subp(["eject", iso_dev])
except Exception as e:
report_diagnostic_event(
"Failed ejecting the provisioning iso: %s" % e,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
@azure_ds_telemetry_reporter
- def register_with_azure_and_fetch_data(self,
- pubkey_info=None,
- iso_dev=None) -> dict:
+ def register_with_azure_and_fetch_data(
+ self, pubkey_info=None, iso_dev=None
+ ) -> dict:
"""Gets the VM's GoalState from Azure, uses the GoalState information
to report ready/send the ready signal/provisioning complete signal to
Azure, and then uses pubkey_info to filter and obtain the user's
@@ -928,7 +1022,8 @@ class WALinuxAgentShim:
http_client_certificate = self.openssl_manager.certificate
if self.azure_endpoint_client is None:
self.azure_endpoint_client = AzureEndpointHttpClient(
- http_client_certificate)
+ http_client_certificate
+ )
goal_state = self._fetch_goal_state_from_azure(
need_certificate=http_client_certificate is not None
)
@@ -936,13 +1031,14 @@ class WALinuxAgentShim:
if pubkey_info is not None:
ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
if iso_dev is not None:
self.eject_iso(iso_dev)
health_reporter.send_ready_signal()
- return {'public-keys': ssh_keys}
+ return {"public-keys": ssh_keys}
@azure_ds_telemetry_reporter
def register_with_azure_and_report_failure(self, description: str) -> None:
@@ -955,13 +1051,14 @@ class WALinuxAgentShim:
self.azure_endpoint_client = AzureEndpointHttpClient(None)
goal_state = self._fetch_goal_state_from_azure(need_certificate=False)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
health_reporter.send_failure_signal(description=description)
@azure_ds_telemetry_reporter
def _fetch_goal_state_from_azure(
- self,
- need_certificate: bool) -> GoalState:
+ self, need_certificate: bool
+ ) -> GoalState:
"""Fetches the GoalState XML from the Azure endpoint, parses the XML,
and returns a GoalState object.
@@ -970,8 +1067,7 @@ class WALinuxAgentShim:
"""
unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure()
return self._parse_raw_goal_state_xml(
- unparsed_goal_state_xml,
- need_certificate
+ unparsed_goal_state_xml, need_certificate
)
@azure_ds_telemetry_reporter
@@ -982,27 +1078,29 @@ class WALinuxAgentShim:
@return: GoalState XML string
"""
- LOG.info('Registering with Azure...')
- url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint)
+ LOG.info("Registering with Azure...")
+ url = "http://{}/machine/?comp=goalstate".format(self.endpoint)
try:
with events.ReportEventStack(
- name="goalstate-retrieval",
- description="retrieve goalstate",
- parent=azure_ds_reporter):
+ name="goalstate-retrieval",
+ description="retrieve goalstate",
+ parent=azure_ds_reporter,
+ ):
response = self.azure_endpoint_client.get(url)
except Exception as e:
report_diagnostic_event(
- 'failed to register with Azure and fetch GoalState XML: %s'
- % e, logger_func=LOG.warning)
+ "failed to register with Azure and fetch GoalState XML: %s"
+ % e,
+ logger_func=LOG.warning,
+ )
raise
- LOG.debug('Successfully fetched GoalState XML.')
+ LOG.debug("Successfully fetched GoalState XML.")
return response.contents
@azure_ds_telemetry_reporter
def _parse_raw_goal_state_xml(
- self,
- unparsed_goal_state_xml: str,
- need_certificate: bool) -> GoalState:
+ self, unparsed_goal_state_xml: str, need_certificate: bool
+ ) -> GoalState:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_goal_state_xml: GoalState XML string
@@ -1013,23 +1111,28 @@ class WALinuxAgentShim:
goal_state = GoalState(
unparsed_goal_state_xml,
self.azure_endpoint_client,
- need_certificate
+ need_certificate,
)
except Exception as e:
report_diagnostic_event(
- 'Error processing GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Error processing GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- msg = ', '.join([
- 'GoalState XML container id: %s' % goal_state.container_id,
- 'GoalState XML instance id: %s' % goal_state.instance_id,
- 'GoalState XML incarnation: %s' % goal_state.incarnation])
+ msg = ", ".join(
+ [
+ "GoalState XML container id: %s" % goal_state.container_id,
+ "GoalState XML instance id: %s" % goal_state.instance_id,
+ "GoalState XML incarnation: %s" % goal_state.incarnation,
+ ]
+ )
report_diagnostic_event(msg, logger_func=LOG.debug)
return goal_state
@azure_ds_telemetry_reporter
def _get_user_pubkeys(
- self, goal_state: GoalState, pubkey_info: list) -> list:
+ self, goal_state: GoalState, pubkey_info: list
+ ) -> list:
"""Gets and filters the VM admin user's authorized pubkeys.
The admin user in this case is the username specified as "admin"
@@ -1057,15 +1160,16 @@ class WALinuxAgentShim:
"""
ssh_keys = []
if goal_state.certificates_xml is not None and pubkey_info is not None:
- LOG.debug('Certificate XML found; parsing out public keys.')
+ LOG.debug("Certificate XML found; parsing out public keys.")
keys_by_fingerprint = self.openssl_manager.parse_certificates(
- goal_state.certificates_xml)
+ goal_state.certificates_xml
+ )
ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
return ssh_keys
@staticmethod
def _filter_pubkeys(keys_by_fingerprint: dict, pubkey_info: list) -> list:
- """ Filter and return only the user's actual pubkeys.
+ """Filter and return only the user's actual pubkeys.
@param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict
that was obtained from GoalState Certificates XML. May contain
@@ -1078,71 +1182,84 @@ class WALinuxAgentShim:
"""
keys = []
for pubkey in pubkey_info:
- if 'value' in pubkey and pubkey['value']:
- keys.append(pubkey['value'])
- elif 'fingerprint' in pubkey and pubkey['fingerprint']:
- fingerprint = pubkey['fingerprint']
+ if "value" in pubkey and pubkey["value"]:
+ keys.append(pubkey["value"])
+ elif "fingerprint" in pubkey and pubkey["fingerprint"]:
+ fingerprint = pubkey["fingerprint"]
if fingerprint in keys_by_fingerprint:
keys.append(keys_by_fingerprint[fingerprint])
else:
- LOG.warning("ovf-env.xml specified PublicKey fingerprint "
- "%s not found in goalstate XML", fingerprint)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey fingerprint "
+ "%s not found in goalstate XML",
+ fingerprint,
+ )
else:
- LOG.warning("ovf-env.xml specified PublicKey with neither "
- "value nor fingerprint: %s", pubkey)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey with neither "
+ "value nor fingerprint: %s",
+ pubkey,
+ )
return keys
@azure_ds_telemetry_reporter
-def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
- pubkey_info=None, iso_dev=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def get_metadata_from_fabric(
+ fallback_lease_file=None, dhcp_opts=None, pubkey_info=None, iso_dev=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
try:
return shim.register_with_azure_and_fetch_data(
- pubkey_info=pubkey_info, iso_dev=iso_dev)
+ pubkey_info=pubkey_info, iso_dev=iso_dev
+ )
finally:
shim.clean_up()
@azure_ds_telemetry_reporter
-def report_failure_to_fabric(fallback_lease_file=None, dhcp_opts=None,
- description=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def report_failure_to_fabric(
+ fallback_lease_file=None, dhcp_opts=None, description=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
if not description:
description = DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
try:
- shim.register_with_azure_and_report_failure(
- description=description)
+ shim.register_with_azure_and_report_failure(description=description)
finally:
shim.clean_up()
def dhcp_log_cb(out, err):
report_diagnostic_event(
- "dhclient output stream: %s" % out, logger_func=LOG.debug)
+ "dhclient output stream: %s" % out, logger_func=LOG.debug
+ )
report_diagnostic_event(
- "dhclient error stream: %s" % err, logger_func=LOG.debug)
+ "dhclient error stream: %s" % err, logger_func=LOG.debug
+ )
class EphemeralDHCPv4WithReporting:
def __init__(self, reporter, nic=None):
self.reporter = reporter
self.ephemeralDHCPv4 = EphemeralDHCPv4(
- iface=nic, dhcp_log_func=dhcp_log_cb)
+ iface=nic, dhcp_log_func=dhcp_log_cb
+ )
def __enter__(self):
with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description="obtain dhcp lease",
- parent=self.reporter):
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=self.reporter,
+ ):
return self.ephemeralDHCPv4.__enter__()
def __exit__(self, excp_type, excp_value, excp_traceback):
- self.ephemeralDHCPv4.__exit__(
- excp_type, excp_value, excp_traceback)
+ self.ephemeralDHCPv4.__exit__(excp_type, excp_value, excp_traceback)
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index f9be4ecb..72515caf 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,20 +8,18 @@ import random
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net as cloudnet
-from cloudinit import url_helper
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, url_helper, util
-NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
+NIC_MAP = {"public": "eth0", "private": "eth1"}
LOG = logging.getLogger(__name__)
def assign_ipv4_link_local(distro, nic=None):
- """Bring up NIC using an address using link-local (ip4LL) IPs. On
- DigitalOcean, the link-local domain is per-droplet routed, so there
- is no risk of collisions. However, to be more safe, the ip4LL
- address is random.
+ """Bring up NIC using an address using link-local (ip4LL) IPs.
+ On DigitalOcean, the link-local domain is per-droplet routed, so there
+ is no risk of collisions. However, to be more safe, the ip4LL
+ address is random.
"""
if not nic:
@@ -29,18 +27,22 @@ def assign_ipv4_link_local(distro, nic=None):
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
- raise RuntimeError("unable to find interfaces to access the"
- "meta-data server. This droplet is broken.")
+ raise RuntimeError(
+ "unable to find interfaces to access the"
+ "meta-data server. This droplet is broken."
+ )
- addr = "169.254.{0}.{1}/16".format(random.randint(1, 168),
- random.randint(0, 255))
+ addr = "169.254.{0}.{1}/16".format(
+ random.randint(1, 168), random.randint(0, 255)
+ )
- ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
- ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
+ ip_addr_cmd = ["ip", "addr", "add", addr, "dev", nic]
+ ip_link_cmd = ["ip", "link", "set", "dev", nic, "up"]
- if not subp.which('ip'):
- raise RuntimeError("No 'ip' command available to configure ip4LL "
- "address")
+ if not subp.which("ip"):
+ raise RuntimeError(
+ "No 'ip' command available to configure ip4LL address"
+ )
try:
subp.subp(ip_addr_cmd)
@@ -48,8 +50,13 @@ def assign_ipv4_link_local(distro, nic=None):
subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
- util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
- " Droplet networking will be broken", addr, nic)
+ util.logexc(
+ LOG,
+ "ip4LL address assignment of '%s' to '%s' failed."
+ " Droplet networking will be broken",
+ addr,
+ nic,
+ )
raise
return nic
@@ -63,21 +70,23 @@ def get_link_local_nic(distro):
]
if not nics:
return None
- return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
+ return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, "ifindex"))
def del_ipv4_link_local(nic=None):
"""Remove the ip4LL address. While this is not necessary, the ip4LL
- address is extraneous and confusing to users.
+ address is extraneous and confusing to users.
"""
if not nic:
- LOG.debug("no link_local address interface defined, skipping link "
- "local address cleanup")
+ LOG.debug(
+ "no link_local address interface defined, skipping link "
+ "local address cleanup"
+ )
return
LOG.debug("cleaning up ipv4LL address")
- ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
+ ip_addr_cmd = ["ip", "addr", "flush", "dev", nic]
try:
subp.subp(ip_addr_cmd)
@@ -89,44 +98,47 @@ def del_ipv4_link_local(nic=None):
def convert_network_configuration(config, dns_servers):
"""Convert the DigitalOcean Network description into Cloud-init's netconfig
- format.
-
- Example JSON:
- {'public': [
- {'mac': '04:01:58:27:7f:01',
- 'ipv4': {'gateway': '45.55.32.1',
- 'netmask': '255.255.224.0',
- 'ip_address': '45.55.50.93'},
- 'anchor_ipv4': {
- 'gateway': '10.17.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.17.0.9'},
- 'type': 'public',
- 'ipv6': {'gateway': '....',
- 'ip_address': '....',
- 'cidr': 64}}
- ],
- 'private': [
- {'mac': '04:01:58:27:7f:02',
- 'ipv4': {'gateway': '10.132.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.132.75.35'},
- 'type': 'private'}
- ]
- }
+ format.
+
+ Example JSON:
+ {'public': [
+ {'mac': '04:01:58:27:7f:01',
+ 'ipv4': {'gateway': '45.55.32.1',
+ 'netmask': '255.255.224.0',
+ 'ip_address': '45.55.50.93'},
+ 'anchor_ipv4': {
+ 'gateway': '10.17.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.17.0.9'},
+ 'type': 'public',
+ 'ipv6': {'gateway': '....',
+ 'ip_address': '....',
+ 'cidr': 64}}
+ ],
+ 'private': [
+ {'mac': '04:01:58:27:7f:02',
+ 'ipv4': {'gateway': '10.132.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.132.75.35'},
+ 'type': 'private'}
+ ]
+ }
"""
def _get_subnet_part(pcfg):
- subpart = {'type': 'static',
- 'control': 'auto',
- 'address': pcfg.get('ip_address'),
- 'gateway': pcfg.get('gateway')}
-
- if ":" in pcfg.get('ip_address'):
- subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'),
- pcfg.get('cidr'))
+ subpart = {
+ "type": "static",
+ "control": "auto",
+ "address": pcfg.get("ip_address"),
+ "gateway": pcfg.get("gateway"),
+ }
+
+ if ":" in pcfg.get("ip_address"):
+ subpart["address"] = "{0}/{1}".format(
+ pcfg.get("ip_address"), pcfg.get("cidr")
+ )
else:
- subpart['netmask'] = pcfg.get('netmask')
+ subpart["netmask"] = pcfg.get("netmask")
return subpart
@@ -138,54 +150,66 @@ def convert_network_configuration(config, dns_servers):
nic = config[n][0]
LOG.debug("considering %s", nic)
- mac_address = nic.get('mac')
+ mac_address = nic.get("mac")
if mac_address not in macs_to_nics:
- raise RuntimeError("Did not find network interface on system "
- "with mac '%s'. Cannot apply configuration: %s"
- % (mac_address, nic))
+ raise RuntimeError(
+ "Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, nic)
+ )
sysfs_name = macs_to_nics.get(mac_address)
- nic_type = nic.get('type', 'unknown')
+ nic_type = nic.get("type", "unknown")
if_name = NIC_MAP.get(nic_type, sysfs_name)
if if_name != sysfs_name:
- LOG.debug("Found %s interface '%s' on '%s', assigned name of '%s'",
- nic_type, mac_address, sysfs_name, if_name)
+ LOG.debug(
+ "Found %s interface '%s' on '%s', assigned name of '%s'",
+ nic_type,
+ mac_address,
+ sysfs_name,
+ if_name,
+ )
else:
- msg = ("Found interface '%s' on '%s', which is not a public "
- "or private interface. Using default system naming.")
+ msg = (
+ "Found interface '%s' on '%s', which is not a public "
+ "or private interface. Using default system naming."
+ )
LOG.debug(msg, mac_address, sysfs_name)
- ncfg = {'type': 'physical',
- 'mac_address': mac_address,
- 'name': if_name}
+ ncfg = {
+ "type": "physical",
+ "mac_address": mac_address,
+ "name": if_name,
+ }
subnets = []
- for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):
+ for netdef in ("ipv4", "ipv6", "anchor_ipv4", "anchor_ipv6"):
raw_subnet = nic.get(netdef, None)
if not raw_subnet:
continue
sub_part = _get_subnet_part(raw_subnet)
if nic_type != "public" or "anchor" in netdef:
- del sub_part['gateway']
+ del sub_part["gateway"]
subnets.append(sub_part)
- ncfg['subnets'] = subnets
+ ncfg["subnets"] = subnets
nic_configs.append(ncfg)
LOG.debug("nic '%s' configuration: %s", if_name, ncfg)
if dns_servers:
LOG.debug("added dns servers: %s", dns_servers)
- nic_configs.append({'type': 'nameserver', 'address': dns_servers})
+ nic_configs.append({"type": "nameserver", "address": dns_servers})
- return {'version': 1, 'config': nic_configs}
+ return {"version": 1, "config": nic_configs}
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return json.loads(response.contents.decode())
@@ -202,16 +226,21 @@ def read_sysinfo():
droplet_id = dmi.read_dmi_data("system-serial-number")
if droplet_id:
- LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s",
- droplet_id)
+ LOG.debug(
+ "system identified via SMBIOS as DigitalOcean Droplet: %s",
+ droplet_id,
+ )
else:
- msg = ("system identified via SMBIOS as a DigitalOcean "
- "Droplet, but did not provide an ID. Please file a "
- "support ticket at: "
- "https://cloud.digitalocean.com/support/tickets/new")
+ msg = (
+ "system identified via SMBIOS as a DigitalOcean "
+ "Droplet, but did not provide an ID. Please file a "
+ "support ticket at: "
+ "https://cloud.digitalocean.com/support/tickets/new"
+ )
LOG.critical(msg)
raise RuntimeError(msg)
return (True, droplet_id)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 33dc4c53..592ae80b 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -3,24 +3,25 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import url_helper
-from cloudinit import util
-
import base64
import binascii
+from cloudinit import url_helper, util
+
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return util.load_yaml(response.contents.decode())
def read_userdata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py
index e13d6834..2953e858 100644
--- a/cloudinit/sources/helpers/netlink.py
+++ b/cloudinit/sources/helpers/netlink.py
@@ -2,14 +2,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
-from cloudinit import util
-from collections import namedtuple
-
import os
import select
import socket
import struct
+from collections import namedtuple
+
+from cloudinit import log as logging
+from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -47,29 +47,30 @@ OPER_TESTING = 4
OPER_DORMANT = 5
OPER_UP = 6
-RTAAttr = namedtuple('RTAAttr', ['length', 'rta_type', 'data'])
-InterfaceOperstate = namedtuple('InterfaceOperstate', ['ifname', 'operstate'])
-NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq',
- 'pid'])
+RTAAttr = namedtuple("RTAAttr", ["length", "rta_type", "data"])
+InterfaceOperstate = namedtuple("InterfaceOperstate", ["ifname", "operstate"])
+NetlinkHeader = namedtuple(
+ "NetlinkHeader", ["length", "type", "flags", "seq", "pid"]
+)
class NetlinkCreateSocketError(RuntimeError):
- '''Raised if netlink socket fails during create or bind.'''
+ """Raised if netlink socket fails during create or bind."""
def create_bound_netlink_socket():
- '''Creates netlink socket and bind on netlink group to catch interface
+ """Creates netlink socket and bind on netlink group to catch interface
down/up events. The socket will bound only on RTMGRP_LINK (which only
includes RTM_NEWLINK/RTM_DELLINK/RTM_GETLINK events). The socket is set to
non-blocking mode since we're only receiving messages.
:returns: netlink socket in non-blocking mode
:raises: NetlinkCreateSocketError
- '''
+ """
try:
- netlink_socket = socket.socket(socket.AF_NETLINK,
- socket.SOCK_RAW,
- socket.NETLINK_ROUTE)
+ netlink_socket = socket.socket(
+ socket.AF_NETLINK, socket.SOCK_RAW, socket.NETLINK_ROUTE
+ )
netlink_socket.bind((os.getpid(), RTMGRP_LINK))
netlink_socket.setblocking(0)
except socket.error as e:
@@ -80,7 +81,7 @@ def create_bound_netlink_socket():
def get_netlink_msg_header(data):
- '''Gets netlink message type and length
+ """Gets netlink message type and length
:param: data read from netlink socket
:returns: netlink message type
@@ -92,18 +93,20 @@ def get_netlink_msg_header(data):
__u32 nlmsg_seq; /* Sequence number */
__u32 nlmsg_pid; /* Sender port ID */
};
- '''
- assert (data is not None), ("data is none")
- assert (len(data) >= NLMSGHDR_SIZE), (
- "data is smaller than netlink message header")
- msg_len, msg_type, flags, seq, pid = struct.unpack(NLMSGHDR_FMT,
- data[:MSG_TYPE_OFFSET])
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) >= NLMSGHDR_SIZE
+ ), "data is smaller than netlink message header"
+ msg_len, msg_type, flags, seq, pid = struct.unpack(
+ NLMSGHDR_FMT, data[:MSG_TYPE_OFFSET]
+ )
LOG.debug("Got netlink msg of type %d", msg_type)
return NetlinkHeader(msg_len, msg_type, flags, seq, pid)
def read_netlink_socket(netlink_socket, timeout=None):
- '''Select and read from the netlink socket if ready.
+ """Select and read from the netlink socket if ready.
:param: netlink_socket: specify which socket object to read from
:param: timeout: specify a timeout value (integer) to wait while reading,
@@ -111,8 +114,8 @@ def read_netlink_socket(netlink_socket, timeout=None):
:returns: string of data read (max length = <MAX_SIZE>) from socket,
if no data read, returns None
:raises: AssertionError if netlink_socket is None
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
read_set, _, _ = select.select([netlink_socket], [], [], timeout)
# Incase of timeout,read_set doesn't contain netlink socket.
# just return from this function
@@ -126,32 +129,33 @@ def read_netlink_socket(netlink_socket, timeout=None):
def unpack_rta_attr(data, offset):
- '''Unpack a single rta attribute.
+ """Unpack a single rta attribute.
:param: data: string of data read from netlink socket
:param: offset: starting offset of RTA Attribute
:return: RTAAttr object with length, type and data. On error, return None.
:raises: AssertionError if data is None or offset is not integer.
- '''
- assert (data is not None), ("data is none")
- assert (type(offset) == int), ("offset is not integer")
- assert (offset >= RTATTR_START_OFFSET), (
- "rta offset is less than expected length")
+ """
+ assert data is not None, "data is none"
+ assert type(offset) == int, "offset is not integer"
+ assert (
+ offset >= RTATTR_START_OFFSET
+ ), "rta offset is less than expected length"
length = rta_type = 0
attr_data = None
try:
length = struct.unpack_from("H", data, offset=offset)[0]
- rta_type = struct.unpack_from("H", data, offset=offset+2)[0]
+ rta_type = struct.unpack_from("H", data, offset=offset + 2)[0]
except struct.error:
return None # Should mean our offset is >= remaining data
# Unpack just the attribute's data. Offset by 4 to skip length/type header
- attr_data = data[offset+RTA_DATA_START_OFFSET:offset+length]
+ attr_data = data[offset + RTA_DATA_START_OFFSET : offset + length]
return RTAAttr(length, rta_type, attr_data)
def read_rta_oper_state(data):
- '''Reads Interface name and operational state from RTA Data.
+ """Reads Interface name and operational state from RTA Data.
:param: data: string of data read from netlink socket
:returns: InterfaceOperstate object containing if_name and oper_state.
@@ -159,10 +163,11 @@ def read_rta_oper_state(data):
IFLA_IFNAME messages.
:raises: AssertionError if data is None or length of data is
smaller than RTATTR_START_OFFSET.
- '''
- assert (data is not None), ("data is none")
- assert (len(data) > RTATTR_START_OFFSET), (
- "length of data is smaller than RTATTR_START_OFFSET")
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) > RTATTR_START_OFFSET
+ ), "length of data is smaller than RTATTR_START_OFFSET"
ifname = operstate = None
offset = RTATTR_START_OFFSET
while offset <= len(data):
@@ -170,15 +175,16 @@ def read_rta_oper_state(data):
if not attr or attr.length == 0:
break
# Each attribute is 4-byte aligned. Determine pad length.
- padlen = (PAD_ALIGNMENT -
- (attr.length % PAD_ALIGNMENT)) % PAD_ALIGNMENT
+ padlen = (
+ PAD_ALIGNMENT - (attr.length % PAD_ALIGNMENT)
+ ) % PAD_ALIGNMENT
offset += attr.length + padlen
if attr.rta_type == IFLA_OPERSTATE:
operstate = ord(attr.data)
elif attr.rta_type == IFLA_IFNAME:
- interface_name = util.decode_binary(attr.data, 'utf-8')
- ifname = interface_name.strip('\0')
+ interface_name = util.decode_binary(attr.data, "utf-8")
+ ifname = interface_name.strip("\0")
if not ifname or operstate is None:
return None
LOG.debug("rta attrs: ifname %s operstate %d", ifname, operstate)
@@ -186,12 +192,12 @@ def read_rta_oper_state(data):
def wait_for_nic_attach_event(netlink_socket, existing_nics):
- '''Block until a single nic is attached.
+ """Block until a single nic is attached.
:param: netlink_socket: netlink_socket to receive events
:param: existing_nics: List of existing nics so that we can skip them.
:raises: AssertionError if netlink_socket is none.
- '''
+ """
LOG.debug("Preparing to wait for nic attach.")
ifname = None
@@ -204,19 +210,21 @@ def wait_for_nic_attach_event(netlink_socket, existing_nics):
# We can return even if the operational state of the new nic is DOWN
# because we set it to UP before doing dhcp.
- read_netlink_messages(netlink_socket,
- None,
- [RTM_NEWLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket,
+ None,
+ [RTM_NEWLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
return ifname
def wait_for_nic_detach_event(netlink_socket):
- '''Block until a single nic is detached and its operational state is down.
+ """Block until a single nic is detached and its operational state is down.
:param: netlink_socket: netlink_socket to receive events.
- '''
+ """
LOG.debug("Preparing to wait for nic detach.")
ifname = None
@@ -225,16 +233,14 @@ def wait_for_nic_detach_event(netlink_socket):
ifname = iname
return False
- read_netlink_messages(netlink_socket,
- None,
- [RTM_DELLINK],
- [OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket, None, [RTM_DELLINK], [OPER_DOWN], should_continue_cb
+ )
return ifname
def wait_for_media_disconnect_connect(netlink_socket, ifname):
- '''Block until media disconnect and connect has happened on an interface.
+ """Block until media disconnect and connect has happened on an interface.
Listens on netlink socket to receive netlink events and when the carrier
changes from 0 to 1, it considers event has happened and
return from this function
@@ -242,10 +248,10 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
:param: netlink_socket: netlink_socket to receive events
:param: ifname: Interface name to lookout for netlink events
:raises: AssertionError if netlink_socket is None or ifname is None.
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
- assert (ifname is not None), ("interface name is none")
- assert (len(ifname) > 0), ("interface name cannot be empty")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
+ assert ifname is not None, "interface name is none"
+ assert len(ifname) > 0, "interface name cannot be empty"
def should_continue_cb(iname, carrier, prevCarrier):
# check for carrier down, up sequence
@@ -256,19 +262,23 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
return True
LOG.debug("Wait for media disconnect and reconnect to happen")
- read_netlink_messages(netlink_socket,
- ifname,
- [RTM_NEWLINK, RTM_DELLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
-
-
-def read_netlink_messages(netlink_socket,
- ifname_filter,
- rtm_types,
- operstates,
- should_continue_callback):
- ''' Reads from the netlink socket until the condition specified by
+ read_netlink_messages(
+ netlink_socket,
+ ifname,
+ [RTM_NEWLINK, RTM_DELLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
+
+
+def read_netlink_messages(
+ netlink_socket,
+ ifname_filter,
+ rtm_types,
+ operstates,
+ should_continue_callback,
+):
+ """Reads from the netlink socket until the condition specified by
the continuation callback is met.
:param: netlink_socket: netlink_socket to receive events.
@@ -276,7 +286,7 @@ def read_netlink_messages(netlink_socket,
:param: rtm_types: Type of netlink events to listen for.
:param: operstates: Operational states to listen.
:param: should_continue_callback: Specifies when to stop listening.
- '''
+ """
if netlink_socket is None:
raise RuntimeError("Netlink socket is none")
data = bytes()
@@ -286,9 +296,9 @@ def read_netlink_messages(netlink_socket,
recv_data = read_netlink_socket(netlink_socket, SELECT_TIMEOUT)
if recv_data is None:
continue
- LOG.debug('read %d bytes from socket', len(recv_data))
+ LOG.debug("read %d bytes from socket", len(recv_data))
data += recv_data
- LOG.debug('Length of data after concat %d', len(data))
+ LOG.debug("Length of data after concat %d", len(data))
offset = 0
datalen = len(data)
while offset < datalen:
@@ -300,30 +310,37 @@ def read_netlink_messages(netlink_socket,
if len(nl_msg) < nlheader.length:
LOG.debug("Partial data. Smaller than netlink message")
break
- padlen = (nlheader.length+PAD_ALIGNMENT-1) & ~(PAD_ALIGNMENT-1)
+ padlen = (nlheader.length + PAD_ALIGNMENT - 1) & ~(
+ PAD_ALIGNMENT - 1
+ )
offset = offset + padlen
- LOG.debug('offset to next netlink message: %d', offset)
+ LOG.debug("offset to next netlink message: %d", offset)
# Continue if we are not interested in this message.
if nlheader.type not in rtm_types:
continue
interface_state = read_rta_oper_state(nl_msg)
if interface_state is None:
- LOG.debug('Failed to read rta attributes: %s', interface_state)
+ LOG.debug("Failed to read rta attributes: %s", interface_state)
continue
- if (ifname_filter is not None and
- interface_state.ifname != ifname_filter):
+ if (
+ ifname_filter is not None
+ and interface_state.ifname != ifname_filter
+ ):
LOG.debug(
"Ignored netlink event on interface %s. Waiting for %s.",
- interface_state.ifname, ifname_filter)
+ interface_state.ifname,
+ ifname_filter,
+ )
continue
if interface_state.operstate not in operstates:
continue
prevCarrier = carrier
carrier = interface_state.operstate
- if not should_continue_callback(interface_state.ifname,
- carrier,
- prevCarrier):
+ if not should_continue_callback(
+ interface_state.ifname, carrier, prevCarrier
+ ):
return
data = data[offset:]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 4f566e64..a42543e4 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -14,11 +14,7 @@ import os
from cloudinit import ec2_utils
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import net, sources, subp, url_helper, util
from cloudinit.sources import BrokenMetadata
# See https://docs.openstack.org/user-guide/cli-config-drive.html
@@ -27,30 +23,30 @@ LOG = logging.getLogger(__name__)
FILES_V1 = {
# Path <-> (metadata key name, translator function, default value)
- 'etc/network/interfaces': ('network_config', lambda x: x, ''),
- 'meta.js': ('meta_js', util.load_json, {}),
- "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
+ "etc/network/interfaces": ("network_config", lambda x: x, ""),
+ "meta.js": ("meta_js", util.load_json, {}),
+ "root/.ssh/authorized_keys": ("authorized_keys", lambda x: x, ""),
}
KEY_COPIES = (
# Cloud-init metadata names <-> (metadata key, is required)
- ('local-hostname', 'hostname', False),
- ('instance-id', 'uuid', True),
+ ("local-hostname", "hostname", False),
+ ("instance-id", "uuid", True),
)
# Versions and names taken from nova source nova/api/metadata/base.py
-OS_LATEST = 'latest'
-OS_FOLSOM = '2012-08-10'
-OS_GRIZZLY = '2013-04-04'
-OS_HAVANA = '2013-10-17'
-OS_LIBERTY = '2015-10-15'
+OS_LATEST = "latest"
+OS_FOLSOM = "2012-08-10"
+OS_GRIZZLY = "2013-04-04"
+OS_HAVANA = "2013-10-17"
+OS_LIBERTY = "2015-10-15"
# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan)
-OS_NEWTON_ONE = '2016-06-30'
+OS_NEWTON_ONE = "2016-06-30"
# NEWTON_TWO adds vendor_data2.json (vendordata-reboot)
-OS_NEWTON_TWO = '2016-10-06'
+OS_NEWTON_TWO = "2016-10-06"
# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan)
-OS_OCATA = '2017-02-22'
+OS_OCATA = "2017-02-22"
# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs)
-OS_ROCKY = '2018-08-27'
+OS_ROCKY = "2018-08-27"
# keep this in chronological order. new supported versions go at the end.
@@ -67,18 +63,18 @@ OS_VERSIONS = (
KNOWN_PHYSICAL_TYPES = (
None,
- 'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
- 'bridge',
- 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
- 'dvs',
- 'ethernet',
- 'hw_veb',
- 'hyperv',
- 'ovs',
- 'phy',
- 'tap',
- 'vhostuser',
- 'vif',
+ "bgpovs", # not present in OpenStack upstream but used on OVH cloud.
+ "bridge",
+ "cascading", # not present in OpenStack upstream, used on OpenTelekomCloud
+ "dvs",
+ "ethernet",
+ "hw_veb",
+ "hyperv",
+ "ovs",
+ "phy",
+ "tap",
+ "vhostuser",
+ "vif",
)
@@ -90,7 +86,7 @@ class SourceMixin(object):
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
return None
- bdm = self.ec2_metadata.get('block-device-mapping', {})
+ bdm = self.ec2_metadata.get("block-device-mapping", {})
for (ent_name, device) in bdm.items():
if name == ent_name:
return device
@@ -105,9 +101,9 @@ class SourceMixin(object):
def _os_name_to_device(self, name):
device = None
try:
- criteria = 'LABEL=%s' % (name)
- if name == 'swap':
- criteria = 'TYPE=%s' % (name)
+ criteria = "LABEL=%s" % (name)
+ if name == "swap":
+ criteria = "TYPE=%s" % (name)
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
@@ -135,10 +131,10 @@ class SourceMixin(object):
return None
# Try the ec2 mapping first
names = [name]
- if name == 'root':
- names.insert(0, 'ami')
- if name == 'ami':
- names.append('root')
+ if name == "root":
+ names.insert(0, "ami")
+ if name == "ami":
+ names.append("root")
device = None
LOG.debug("Using ec2 style lookup to find device %s", names)
for n in names:
@@ -163,7 +159,6 @@ class SourceMixin(object):
class BaseReader(metaclass=abc.ABCMeta):
-
def __init__(self, base_path):
self.base_path = base_path
@@ -187,8 +182,11 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
versions_available = self._fetch_available_versions()
except Exception as e:
- LOG.debug("Unable to read openstack versions from %s due to: %s",
- self.base_path, e)
+ LOG.debug(
+ "Unable to read openstack versions from %s due to: %s",
+ self.base_path,
+ e,
+ )
versions_available = []
# openstack.OS_VERSIONS is stored in chronological order, so
@@ -202,12 +200,15 @@ class BaseReader(metaclass=abc.ABCMeta):
selected_version = potential_version
break
- LOG.debug("Selected version '%s' from %s", selected_version,
- versions_available)
+ LOG.debug(
+ "Selected version '%s' from %s",
+ selected_version,
+ versions_available,
+ )
return selected_version
def _read_content_path(self, item, decode=False):
- path = item.get('content_path', '').lstrip("/")
+ path = item.get("content_path", "").lstrip("/")
path_pieces = path.split("/")
valid_pieces = [p for p in path_pieces if len(p)]
if not valid_pieces:
@@ -225,43 +226,44 @@ class BaseReader(metaclass=abc.ABCMeta):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list, str))
+ util.load_json, root_types=(dict, list, str)
+ )
def datafiles(version):
files = {}
- files['metadata'] = (
+ files["metadata"] = (
# File path to read
- self._path_join("openstack", version, 'meta_data.json'),
+ self._path_join("openstack", version, "meta_data.json"),
# Is it required?
True,
# Translator function (applied after loading)
util.load_json,
)
- files['userdata'] = (
- self._path_join("openstack", version, 'user_data'),
+ files["userdata"] = (
+ self._path_join("openstack", version, "user_data"),
False,
lambda x: x,
)
- files['vendordata'] = (
- self._path_join("openstack", version, 'vendor_data.json'),
+ files["vendordata"] = (
+ self._path_join("openstack", version, "vendor_data.json"),
False,
load_json_anytype,
)
- files['vendordata2'] = (
- self._path_join("openstack", version, 'vendor_data2.json'),
+ files["vendordata2"] = (
+ self._path_join("openstack", version, "vendor_data2.json"),
False,
load_json_anytype,
)
- files['networkdata'] = (
- self._path_join("openstack", version, 'network_data.json'),
+ files["networkdata"] = (
+ self._path_join("openstack", version, "network_data.json"),
False,
load_json_anytype,
)
return files
results = {
- 'userdata': '',
- 'version': 2,
+ "userdata": "",
+ "version": 2,
}
data = datafiles(self._find_working_version())
for (name, (path, required, translator)) in data.items():
@@ -272,11 +274,13 @@ class BaseReader(metaclass=abc.ABCMeta):
data = self._path_read(path)
except IOError as e:
if not required:
- LOG.debug("Failed reading optional path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading optional path %s due to: %s", path, e
+ )
else:
- LOG.debug("Failed reading mandatory path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading mandatory path %s due to: %s", path, e
+ )
else:
found = True
if required and not found:
@@ -291,11 +295,11 @@ class BaseReader(metaclass=abc.ABCMeta):
if found:
results[name] = data
- metadata = results['metadata']
- if 'random_seed' in metadata:
- random_seed = metadata['random_seed']
+ metadata = results["metadata"]
+ if "random_seed" in metadata:
+ random_seed = metadata["random_seed"]
try:
- metadata['random_seed'] = base64.b64decode(random_seed)
+ metadata["random_seed"] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
raise BrokenMetadata(
"Badly formatted metadata random_seed entry: %s" % e
@@ -303,18 +307,18 @@ class BaseReader(metaclass=abc.ABCMeta):
# load any files that were provided
files = {}
- metadata_files = metadata.get('files', [])
+ metadata_files = metadata.get("files", [])
for item in metadata_files:
- if 'path' not in item:
+ if "path" not in item:
continue
- path = item['path']
+ path = item["path"]
try:
files[path] = self._read_content_path(item)
except Exception as e:
raise BrokenMetadata(
"Failed to read provided file %s: %s" % (path, e)
) from e
- results['files'] = files
+ results["files"] = files
# The 'network_config' item in metadata is a content pointer
# to the network config that should be applied. It is just a
@@ -323,7 +327,7 @@ class BaseReader(metaclass=abc.ABCMeta):
if net_item:
try:
content = self._read_content_path(net_item, decode=True)
- results['network_config'] = content
+ results["network_config"] = content
except IOError as e:
raise BrokenMetadata(
"Failed to read network configuration: %s" % (e)
@@ -334,12 +338,12 @@ class BaseReader(metaclass=abc.ABCMeta):
# if they specify 'dsmode' they're indicating the mode that they intend
# for this datasource to operate in.
try:
- results['dsmode'] = metadata['meta']['dsmode']
+ results["dsmode"] = metadata["meta"]["dsmode"]
except KeyError:
pass
# Read any ec2-metadata (if applicable)
- results['ec2-metadata'] = self._read_ec2_metadata()
+ results["ec2-metadata"] = self._read_ec2_metadata()
# Perform some misc. metadata key renames...
for (target_key, source_key, is_required) in KEY_COPIES:
@@ -364,15 +368,19 @@ class ConfigDriveReader(BaseReader):
def _fetch_available_versions(self):
if self._versions is None:
- path = self._path_join(self.base_path, 'openstack')
- found = [d for d in os.listdir(path)
- if os.path.isdir(os.path.join(path))]
+ path = self._path_join(self.base_path, "openstack")
+ found = [
+ d
+ for d in os.listdir(path)
+ if os.path.isdir(os.path.join(path))
+ ]
self._versions = sorted(found)
return self._versions
def _read_ec2_metadata(self):
- path = self._path_join(self.base_path,
- 'ec2', 'latest', 'meta-data.json')
+ path = self._path_join(
+ self.base_path, "ec2", "latest", "meta-data.json"
+ )
if not os.path.exists(path):
return {}
else:
@@ -419,14 +427,14 @@ class ConfigDriveReader(BaseReader):
else:
md[key] = copy.deepcopy(default)
- keydata = md['authorized_keys']
- meta_js = md['meta_js']
+ keydata = md["authorized_keys"]
+ meta_js = md["meta_js"]
# keydata in meta_js is preferred over "injected"
- keydata = meta_js.get('public-keys', keydata)
+ keydata = meta_js.get("public-keys", keydata)
if keydata:
lines = keydata.splitlines()
- md['public-keys'] = [
+ md["public-keys"] = [
line
for line in lines
if len(line) and not line.startswith("#")
@@ -434,25 +442,25 @@ class ConfigDriveReader(BaseReader):
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
- if 'instance-id' in meta_js:
- md['instance-id'] = meta_js['instance-id']
+ if "instance-id" in meta_js:
+ md["instance-id"] = meta_js["instance-id"]
results = {
- 'version': 1,
- 'metadata': md,
+ "version": 1,
+ "metadata": md,
}
# allow the user to specify 'dsmode' in a meta tag
- if 'dsmode' in meta_js:
- results['dsmode'] = meta_js['dsmode']
+ if "dsmode" in meta_js:
+ results["dsmode"] = meta_js["dsmode"]
# config-drive-v1 has no way of specifying user-data, so the user has
# to cheat and stuff it in a meta tag also.
- results['userdata'] = meta_js.get('user-data', '')
+ results["userdata"] = meta_js.get("user-data", "")
# this implementation does not support files other than
# network/interfaces and authorized_keys...
- results['files'] = {}
+ results["files"] = {}
return results
@@ -481,7 +489,6 @@ class MetadataReader(BaseReader):
return self._versions
def _path_read(self, path, decode=False):
-
def should_retry_cb(_request_args, cause):
try:
code = int(cause.code)
@@ -492,11 +499,13 @@ class MetadataReader(BaseReader):
pass
return True
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout,
- exception_cb=should_retry_cb)
+ response = url_helper.readurl(
+ path,
+ retries=self.retries,
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ exception_cb=should_retry_cb,
+ )
if decode:
return response.contents.decode()
else:
@@ -506,9 +515,11 @@ class MetadataReader(BaseReader):
return url_helper.combine_url(base, *add_ons)
def _read_ec2_metadata(self):
- return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,
- timeout=self.timeout,
- retries=self.retries)
+ return ec2_utils.get_instance_metadata(
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ retries=self.retries,
+ )
# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
@@ -544,32 +555,32 @@ def convert_net_json(network_json=None, known_macs=None):
# dict of network_config key for filtering network_json
valid_keys = {
- 'physical': [
- 'name',
- 'type',
- 'mac_address',
- 'subnets',
- 'params',
- 'mtu',
+ "physical": [
+ "name",
+ "type",
+ "mac_address",
+ "subnets",
+ "params",
+ "mtu",
],
- 'subnet': [
- 'type',
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'scope',
- 'dns_nameservers',
- 'dns_search',
- 'routes',
+ "subnet": [
+ "type",
+ "address",
+ "netmask",
+ "broadcast",
+ "metric",
+ "gateway",
+ "pointopoint",
+ "scope",
+ "dns_nameservers",
+ "dns_search",
+ "routes",
],
}
- links = network_json.get('links', [])
- networks = network_json.get('networks', [])
- services = network_json.get('services', [])
+ links = network_json.get("links", [])
+ networks = network_json.get("networks", [])
+ services = network_json.get("services", [])
link_updates = []
link_id_info = {}
@@ -578,65 +589,77 @@ def convert_net_json(network_json=None, known_macs=None):
config = []
for link in links:
subnets = []
- cfg = dict((k, v) for k, v in link.items()
- if k in valid_keys['physical'])
+ cfg = dict(
+ (k, v) for k, v in link.items() if k in valid_keys["physical"]
+ )
# 'name' is not in openstack spec yet, but we will support it if it is
# present. The 'id' in the spec is currently implemented as the host
# nic's name, meaning something like 'tap-adfasdffd'. We do not want
# to name guest devices with such ugly names.
- if 'name' in link:
- cfg['name'] = link['name']
+ if "name" in link:
+ cfg["name"] = link["name"]
link_mac_addr = None
- if link.get('ethernet_mac_address'):
- link_mac_addr = link.get('ethernet_mac_address').lower()
- link_id_info[link['id']] = link_mac_addr
-
- curinfo = {'name': cfg.get('name'), 'mac': link_mac_addr,
- 'id': link['id'], 'type': link['type']}
-
- for network in [n for n in networks
- if n['link'] == link['id']]:
- subnet = dict((k, v) for k, v in network.items()
- if k in valid_keys['subnet'])
-
- if network['type'] == 'ipv4_dhcp':
- subnet.update({'type': 'dhcp4'})
- elif network['type'] == 'ipv6_dhcp':
- subnet.update({'type': 'dhcp6'})
- elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless',
- 'ipv6_dhcpv6-stateful']:
- subnet.update({'type': network['type']})
- elif network['type'] in ['ipv4', 'static']:
- subnet.update({
- 'type': 'static',
- 'address': network.get('ip_address'),
- })
- elif network['type'] in ['ipv6', 'static6']:
- cfg.update({'accept-ra': False})
- subnet.update({
- 'type': 'static6',
- 'address': network.get('ip_address'),
- })
+ if link.get("ethernet_mac_address"):
+ link_mac_addr = link.get("ethernet_mac_address").lower()
+ link_id_info[link["id"]] = link_mac_addr
+
+ curinfo = {
+ "name": cfg.get("name"),
+ "mac": link_mac_addr,
+ "id": link["id"],
+ "type": link["type"],
+ }
+
+ for network in [n for n in networks if n["link"] == link["id"]]:
+ subnet = dict(
+ (k, v) for k, v in network.items() if k in valid_keys["subnet"]
+ )
+
+ if network["type"] == "ipv4_dhcp":
+ subnet.update({"type": "dhcp4"})
+ elif network["type"] == "ipv6_dhcp":
+ subnet.update({"type": "dhcp6"})
+ elif network["type"] in [
+ "ipv6_slaac",
+ "ipv6_dhcpv6-stateless",
+ "ipv6_dhcpv6-stateful",
+ ]:
+ subnet.update({"type": network["type"]})
+ elif network["type"] in ["ipv4", "static"]:
+ subnet.update(
+ {
+ "type": "static",
+ "address": network.get("ip_address"),
+ }
+ )
+ elif network["type"] in ["ipv6", "static6"]:
+ cfg.update({"accept-ra": False})
+ subnet.update(
+ {
+ "type": "static6",
+ "address": network.get("ip_address"),
+ }
+ )
# Enable accept_ra for stateful and legacy ipv6_dhcp types
- if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']:
- cfg.update({'accept-ra': True})
+ if network["type"] in ["ipv6_dhcpv6-stateful", "ipv6_dhcp"]:
+ cfg.update({"accept-ra": True})
- if network['type'] == 'ipv4':
- subnet['ipv4'] = True
- if network['type'] == 'ipv6':
- subnet['ipv6'] = True
+ if network["type"] == "ipv4":
+ subnet["ipv4"] = True
+ if network["type"] == "ipv6":
+ subnet["ipv6"] = True
subnets.append(subnet)
- cfg.update({'subnets': subnets})
- if link['type'] in ['bond']:
+ cfg.update({"subnets": subnets})
+ if link["type"] in ["bond"]:
params = {}
if link_mac_addr:
- params['mac_address'] = link_mac_addr
+ params["mac_address"] = link_mac_addr
for k, v in link.items():
- if k == 'bond_links':
+ if k == "bond_links":
continue
- elif k.startswith('bond'):
+ elif k.startswith("bond"):
params.update({k: v})
# openstack does not provide a name for the bond.
@@ -649,35 +672,45 @@ def convert_net_json(network_json=None, known_macs=None):
# to the network config by their nic name.
# store that in bond_links_needed, and update these later.
link_updates.append(
- (cfg, 'bond_interfaces', '%s',
- copy.deepcopy(link['bond_links']))
+ (
+ cfg,
+ "bond_interfaces",
+ "%s",
+ copy.deepcopy(link["bond_links"]),
+ )
+ )
+ cfg.update({"params": params, "name": link_name})
+
+ curinfo["name"] = link_name
+ elif link["type"] in ["vlan"]:
+ name = "%s.%s" % (link["vlan_link"], link["vlan_id"])
+ cfg.update(
+ {
+ "name": name,
+ "vlan_id": link["vlan_id"],
+ "mac_address": link["vlan_mac_address"],
+ }
+ )
+ link_updates.append((cfg, "vlan_link", "%s", link["vlan_link"]))
+ link_updates.append(
+ (cfg, "name", "%%s.%s" % link["vlan_id"], link["vlan_link"])
)
- cfg.update({'params': params, 'name': link_name})
-
- curinfo['name'] = link_name
- elif link['type'] in ['vlan']:
- name = "%s.%s" % (link['vlan_link'], link['vlan_id'])
- cfg.update({
- 'name': name,
- 'vlan_id': link['vlan_id'],
- 'mac_address': link['vlan_mac_address'],
- })
- link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link']))
- link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'],
- link['vlan_link']))
- curinfo.update({'mac': link['vlan_mac_address'],
- 'name': name})
+ curinfo.update({"mac": link["vlan_mac_address"], "name": name})
else:
- if link['type'] not in KNOWN_PHYSICAL_TYPES:
- LOG.warning('Unknown network_data link type (%s); treating as'
- ' physical', link['type'])
- cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
+ if link["type"] not in KNOWN_PHYSICAL_TYPES:
+ LOG.warning(
+ "Unknown network_data link type (%s); treating as"
+ " physical",
+ link["type"],
+ )
+ cfg.update({"type": "physical", "mac_address": link_mac_addr})
config.append(cfg)
- link_id_info[curinfo['id']] = curinfo
+ link_id_info[curinfo["id"]] = curinfo
- need_names = [d for d in config
- if d.get('type') == 'physical' and 'name' not in d]
+ need_names = [
+ d for d in config if d.get("type") == "physical" and "name" not in d
+ ]
if need_names or link_updates:
if known_macs is None:
@@ -685,26 +718,26 @@ def convert_net_json(network_json=None, known_macs=None):
# go through and fill out the link_id_info with names
for _link_id, info in link_id_info.items():
- if info.get('name'):
+ if info.get("name"):
continue
- if info.get('mac') in known_macs:
- info['name'] = known_macs[info['mac']]
+ if info.get("mac") in known_macs:
+ info["name"] = known_macs[info["mac"]]
for d in need_names:
- mac = d.get('mac_address')
+ mac = d.get("mac_address")
if not mac:
raise ValueError("No mac_address or name entry for %s" % d)
if mac not in known_macs:
raise ValueError("Unable to find a system nic for %s" % d)
- d['name'] = known_macs[mac]
+ d["name"] = known_macs[mac]
for cfg, key, fmt, targets in link_updates:
if isinstance(targets, (list, tuple)):
cfg[key] = [
- fmt % link_id_info[target]['name'] for target in targets
+ fmt % link_id_info[target]["name"] for target in targets
]
else:
- cfg[key] = fmt % link_id_info[targets]['name']
+ cfg[key] = fmt % link_id_info[targets]["name"]
# Infiniband interfaces may be referenced in network_data.json by a 6 byte
# Ethernet MAC-style address, and we use that address to look up the
@@ -713,15 +746,16 @@ def convert_net_json(network_json=None, known_macs=None):
ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
if ib_known_hwaddrs:
for cfg in config:
- if cfg['name'] in ib_known_hwaddrs:
- cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
- cfg['type'] = 'infiniband'
+ if cfg["name"] in ib_known_hwaddrs:
+ cfg["mac_address"] = ib_known_hwaddrs[cfg["name"]]
+ cfg["type"] = "infiniband"
for service in services:
cfg = service
- cfg.update({'type': 'nameserver'})
+ cfg.update({"type": "nameserver"})
config.append(cfg)
- return {'version': 1, 'config': config}
+ return {"version": 1, "config": config}
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/upcloud.py b/cloudinit/sources/helpers/upcloud.py
index 199baa58..e7b95a5e 100644
--- a/cloudinit/sources/helpers/upcloud.py
+++ b/cloudinit/sources/helpers/upcloud.py
@@ -169,7 +169,7 @@ def convert_to_network_config_v1(config):
interface = {
"type": "physical",
"name": sysfs_name,
- "mac_address": mac_address
+ "mac_address": mac_address,
}
subnets = []
@@ -182,10 +182,9 @@ def convert_to_network_config_v1(config):
if config.get("dns"):
LOG.debug("Setting DNS nameservers to %s", config.get("dns"))
- nic_configs.append({
- "type": "nameserver",
- "address": config.get("dns")
- })
+ nic_configs.append(
+ {"type": "nameserver", "address": config.get("dns")}
+ )
return {"version": 1, "config": nic_configs}
@@ -216,8 +215,7 @@ def read_sysinfo():
server_uuid = dmi.read_dmi_data("system-uuid")
if server_uuid:
LOG.debug(
- "system identified via SMBIOS as UpCloud server: %s",
- server_uuid
+ "system identified via SMBIOS as UpCloud server: %s", server_uuid
)
else:
msg = (
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
index 9a07eafa..a5c67bb7 100644
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -9,7 +9,8 @@
class BootProtoEnum(object):
"""Specifies the NIC Boot Settings."""
- DHCP = 'dhcp'
- STATIC = 'static'
+ DHCP = "dhcp"
+ STATIC = "static"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index bdfab5a0..39dacee0 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -15,20 +15,20 @@ class Config(object):
Specification file.
"""
- CUSTOM_SCRIPT = 'CUSTOM-SCRIPT|SCRIPT-NAME'
- DNS = 'DNS|NAMESERVER|'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
- HOSTNAME = 'NETWORK|HOSTNAME'
- MARKERID = 'MISC|MARKER-ID'
- PASS = 'PASSWORD|-PASS'
- RESETPASS = 'PASSWORD|RESET'
- SUFFIX = 'DNS|SUFFIX|'
- TIMEZONE = 'DATETIME|TIMEZONE'
- UTC = 'DATETIME|UTC'
- POST_GC_STATUS = 'MISC|POST-GC-STATUS'
- DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
- CLOUDINIT_META_DATA = 'CLOUDINIT|METADATA'
- CLOUDINIT_USER_DATA = 'CLOUDINIT|USERDATA'
+ CUSTOM_SCRIPT = "CUSTOM-SCRIPT|SCRIPT-NAME"
+ DNS = "DNS|NAMESERVER|"
+ DOMAINNAME = "NETWORK|DOMAINNAME"
+ HOSTNAME = "NETWORK|HOSTNAME"
+ MARKERID = "MISC|MARKER-ID"
+ PASS = "PASSWORD|-PASS"
+ RESETPASS = "PASSWORD|RESET"
+ SUFFIX = "DNS|SUFFIX|"
+ TIMEZONE = "DATETIME|TIMEZONE"
+ UTC = "DATETIME|UTC"
+ POST_GC_STATUS = "MISC|POST-GC-STATUS"
+ DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT"
+ CLOUDINIT_META_DATA = "CLOUDINIT|METADATA"
+ CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA"
def __init__(self, configFile):
self._configFile = configFile
@@ -84,8 +84,8 @@ class Config(object):
def nics(self):
"""Return the list of associated NICs."""
res = []
- nics = self._configFile['NIC-CONFIG|NICS']
- for nic in nics.split(','):
+ nics = self._configFile["NIC-CONFIG|NICS"]
+ for nic in nics.split(","):
res.append(Nic(nic, self._configFile))
return res
@@ -93,11 +93,11 @@ class Config(object):
@property
def reset_password(self):
"""Retreives if the root password needs to be reset."""
- resetPass = self._configFile.get(Config.RESETPASS, 'no')
+ resetPass = self._configFile.get(Config.RESETPASS, "no")
resetPass = resetPass.lower()
- if resetPass not in ('yes', 'no'):
- raise ValueError('ResetPassword value should be yes/no')
- return resetPass == 'yes'
+ if resetPass not in ("yes", "no"):
+ raise ValueError("ResetPassword value should be yes/no")
+ return resetPass == "yes"
@property
def marker_id(self):
@@ -112,11 +112,11 @@ class Config(object):
@property
def post_gc_status(self):
"""Return whether to post guestinfo.gc.status VMX property."""
- postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, "no")
postGcStatus = postGcStatus.lower()
- if postGcStatus not in ('yes', 'no'):
- raise ValueError('PostGcStatus value should be yes/no')
- return postGcStatus == 'yes'
+ if postGcStatus not in ("yes", "no"):
+ raise ValueError("PostGcStatus value should be yes/no")
+ return postGcStatus == "yes"
@property
def default_run_post_script(self):
@@ -125,12 +125,12 @@ class Config(object):
is absent in VM Tools configuration
"""
defaultRunPostScript = self._configFile.get(
- Config.DEFAULT_RUN_POST_SCRIPT,
- 'no')
+ Config.DEFAULT_RUN_POST_SCRIPT, "no"
+ )
defaultRunPostScript = defaultRunPostScript.lower()
- if defaultRunPostScript not in ('yes', 'no'):
- raise ValueError('defaultRunPostScript value should be yes/no')
- return defaultRunPostScript == 'yes'
+ if defaultRunPostScript not in ("yes", "no"):
+ raise ValueError("defaultRunPostScript value should be yes/no")
+ return defaultRunPostScript == "yes"
@property
def meta_data_name(self):
@@ -142,4 +142,5 @@ class Config(object):
"""Return the name of cloud-init user data."""
return self._configFile.get(Config.CLOUDINIT_USER_DATA, None)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 2ab22de9..8240ea8f 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,8 +9,7 @@ import logging
import os
import stat
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -24,8 +23,7 @@ class CustomScriptConstant(object):
# The user defined custom script
CUSTOM_SCRIPT_NAME = "customize.sh"
- CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR,
- CUSTOM_SCRIPT_NAME)
+ CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, CUSTOM_SCRIPT_NAME)
POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
# The cc_scripts_per_instance script to launch custom script
POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh"
@@ -39,22 +37,25 @@ class RunCustomScript(object):
def prepare_script(self):
if not os.path.exists(self.scriptpath):
- raise CustomScriptNotFound("Script %s not found!! "
- "Cannot execute custom script!"
- % self.scriptpath)
+ raise CustomScriptNotFound(
+ "Script %s not found!! Cannot execute custom script!"
+ % self.scriptpath
+ )
util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR)
- LOG.debug("Copying custom script to %s",
- CustomScriptConstant.CUSTOM_SCRIPT)
+ LOG.debug(
+ "Copying custom script to %s", CustomScriptConstant.CUSTOM_SCRIPT
+ )
util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT)
# Strip any CR characters from the decoded script
- content = util.load_file(
- CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "")
- util.write_file(CustomScriptConstant.CUSTOM_SCRIPT,
- content,
- mode=0o544)
+ content = util.load_file(CustomScriptConstant.CUSTOM_SCRIPT).replace(
+ "\r", ""
+ )
+ util.write_file(
+ CustomScriptConstant.CUSTOM_SCRIPT, content, mode=0o544
+ )
class PreCustomScript(RunCustomScript):
@@ -70,8 +71,8 @@ class PostCustomScript(RunCustomScript):
super(PostCustomScript, self).__init__(scriptname, directory)
self.ccScriptsDir = ccScriptsDir
self.ccScriptPath = os.path.join(
- ccScriptsDir,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME)
+ ccScriptsDir, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ )
def execute(self):
"""
@@ -81,15 +82,17 @@ class PostCustomScript(RunCustomScript):
"""
self.prepare_script()
- LOG.debug("Copying post customize run script to %s",
- self.ccScriptPath)
+ LOG.debug("Copying post customize run script to %s", self.ccScriptPath)
util.copy(
- os.path.join(self.directory,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME),
- self.ccScriptPath)
+ os.path.join(
+ self.directory, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ ),
+ self.ccScriptPath,
+ )
st = os.stat(self.ccScriptPath)
os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC)
LOG.info("Creating post customization pending marker")
util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index fc034c95..845294ec 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -35,7 +35,7 @@ class ConfigFile(ConfigSource, dict):
key = key.strip()
val = val.strip()
- if key.startswith('-') or '|-' in key:
+ if key.startswith("-") or "|-" in key:
canLog = False
else:
canLog = True
@@ -59,7 +59,7 @@ class ConfigFile(ConfigSource, dict):
Keyword arguments:
filename - The full path to the config file.
"""
- logger.info('Parsing the config file %s.', filename)
+ logger.info("Parsing the config file %s.", filename)
config = configparser.ConfigParser()
config.optionxform = str
@@ -71,7 +71,7 @@ class ConfigFile(ConfigSource, dict):
logger.debug("FOUND CATEGORY = '%s'", category)
for (key, value) in config.items(category):
- self._insertKey(category + '|' + key, value)
+ self._insertKey(category + "|" + key, value)
def should_keep_current_value(self, key):
"""
@@ -115,4 +115,5 @@ class ConfigFile(ConfigSource, dict):
"""
return len([key for key in self if key.startswith(prefix)])
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 5899d8f7..3b3b2d5a 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -11,4 +11,5 @@ from .config_source import ConfigSource
class ConfigNamespace(ConfigSource):
"""Specifies the Config Namespace."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index f5a0ebe4..df621f20 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -9,9 +9,8 @@ import logging
import os
import re
+from cloudinit import subp, util
from cloudinit.net.network_state import mask_to_net_prefix
-from cloudinit import subp
-from cloudinit import util
logger = logging.getLogger(__name__)
@@ -63,8 +62,10 @@ class NicConfigurator(object):
if not primary_nics:
return None
elif len(primary_nics) > 1:
- raise Exception('There can only be one primary nic',
- [nic.mac for nic in primary_nics])
+ raise Exception(
+ "There can only be one primary nic",
+ [nic.mac for nic in primary_nics],
+ )
else:
return primary_nics[0]
@@ -73,17 +74,17 @@ class NicConfigurator(object):
Create the mac2Name dictionary
The mac address(es) are in the lower case
"""
- cmd = ['ip', 'addr', 'show']
+ cmd = ["ip", "addr", "show"]
output, _err = subp.subp(cmd)
- sections = re.split(r'\n\d+: ', '\n' + output)[1:]
+ sections = re.split(r"\n\d+: ", "\n" + output)[1:]
- macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
+ macPat = r"link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))"
for section in sections:
match = re.search(macPat, section)
if not match: # Only keep info about nics
continue
mac = match.group(1).lower()
- name = section.split(':', 1)[0]
+ name = section.split(":", 1)[0]
self.mac2Name[mac] = name
def gen_one_nic(self, nic):
@@ -95,11 +96,11 @@ class NicConfigurator(object):
mac = nic.mac.lower()
name = self.mac2Name.get(mac)
if not name:
- raise ValueError('No known device has MACADDR: %s' % nic.mac)
+ raise ValueError("No known device has MACADDR: %s" % nic.mac)
nics_cfg_list = []
- cfg = {'type': 'physical', 'name': name, 'mac_address': mac}
+ cfg = {"type": "physical", "name": name, "mac_address": mac}
subnet_list = []
route_list = []
@@ -114,7 +115,7 @@ class NicConfigurator(object):
subnet_list.extend(subnets)
route_list.extend(routes)
- cfg.update({'subnets': subnet_list})
+ cfg.update({"subnets": subnet_list})
nics_cfg_list.append(cfg)
if route_list:
@@ -135,17 +136,17 @@ class NicConfigurator(object):
route_list = []
if nic.onboot:
- subnet.update({'control': 'auto'})
+ subnet.update({"control": "auto"})
bootproto = nic.bootProto.lower()
- if nic.ipv4_mode.lower() == 'disabled':
- bootproto = 'manual'
+ if nic.ipv4_mode.lower() == "disabled":
+ bootproto = "manual"
- if bootproto != 'static':
- subnet.update({'type': 'dhcp'})
+ if bootproto != "static":
+ subnet.update({"type": "dhcp"})
return ([subnet], route_list)
else:
- subnet.update({'type': 'static'})
+ subnet.update({"type": "static"})
# Static Ipv4
addrs = nic.staticIpv4
@@ -154,20 +155,21 @@ class NicConfigurator(object):
v4 = addrs[0]
if v4.ip:
- subnet.update({'address': v4.ip})
+ subnet.update({"address": v4.ip})
if v4.netmask:
- subnet.update({'netmask': v4.netmask})
+ subnet.update({"netmask": v4.netmask})
# Add the primary gateway
if nic.primary and v4.gateways:
self.ipv4PrimaryGateway = v4.gateways[0]
- subnet.update({'gateway': self.ipv4PrimaryGateway})
+ subnet.update({"gateway": self.ipv4PrimaryGateway})
return ([subnet], route_list)
# Add routes if there is no primary nic
if not self._primaryNic and v4.gateways:
subnet.update(
- {'routes': self.gen_ipv4_route(nic, v4.gateways, v4.netmask)})
+ {"routes": self.gen_ipv4_route(nic, v4.gateways, v4.netmask)}
+ )
return ([subnet], route_list)
@@ -184,10 +186,14 @@ class NicConfigurator(object):
for gateway in gateways:
destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr)
- route_list.append({'destination': destination,
- 'type': 'route',
- 'gateway': gateway,
- 'metric': 10000})
+ route_list.append(
+ {
+ "destination": destination,
+ "type": "route",
+ "gateway": gateway,
+ "metric": 10000,
+ }
+ )
return route_list
@@ -208,9 +214,11 @@ class NicConfigurator(object):
addrs = nic.staticIpv6
for addr in addrs:
- subnet = {'type': 'static6',
- 'address': addr.ip,
- 'netmask': addr.netmask}
+ subnet = {
+ "type": "static6",
+ "address": addr.ip,
+ "netmask": addr.netmask,
+ }
subnet_list.append(subnet)
# TODO: Add the primary gateway
@@ -226,9 +234,9 @@ class NicConfigurator(object):
route_list = []
for addr in addrs:
- route_list.append({'type': 'route',
- 'gateway': addr.gateway,
- 'metric': 10000})
+ route_list.append(
+ {"type": "route", "gateway": addr.gateway, "metric": 10000}
+ )
return route_list
@@ -246,7 +254,7 @@ class NicConfigurator(object):
return nics_cfg_list
def clear_dhcp(self):
- logger.info('Clearing DHCP leases')
+ logger.info("Clearing DHCP leases")
# Ignore the return code 1.
subp.subp(["pkill", "dhclient"], rcs=[0, 1])
@@ -262,11 +270,12 @@ class NicConfigurator(object):
logger.info("Debian OS not detected. Skipping the configure step")
return
- containingDir = '/etc/network'
+ containingDir = "/etc/network"
- interfaceFile = os.path.join(containingDir, 'interfaces')
- originalFile = os.path.join(containingDir,
- 'interfaces.before_vmware_customization')
+ interfaceFile = os.path.join(containingDir, "interfaces")
+ originalFile = os.path.join(
+ containingDir, "interfaces.before_vmware_customization"
+ )
if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
os.rename(interfaceFile, originalFile)
@@ -278,8 +287,9 @@ class NicConfigurator(object):
"source-directory /etc/network/interfaces.d",
]
- util.write_file(interfaceFile, content='\n'.join(lines))
+ util.write_file(interfaceFile, content="\n".join(lines))
self.clear_dhcp()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index d16a7690..4d3967a1 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,8 +9,7 @@
import logging
import os
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -20,6 +19,7 @@ class PasswordConfigurator(object):
Class for changing configurations related to passwords in a VM. Includes
setting and expiring passwords.
"""
+
def configure(self, passwd, resetPasswd, distro):
"""
Main method to perform all functionalities based on configuration file
@@ -28,25 +28,25 @@ class PasswordConfigurator(object):
@param resetPasswd: boolean to determine if password needs to be reset.
@return cfg: dict to be used by cloud-init set_passwd code.
"""
- LOG.info('Starting password configuration')
+ LOG.info("Starting password configuration")
if passwd:
passwd = util.b64d(passwd)
allRootUsers = []
- for line in open('/etc/passwd', 'r'):
- if line.split(':')[2] == '0':
- allRootUsers.append(line.split(':')[0])
+ for line in open("/etc/passwd", "r"):
+ if line.split(":")[2] == "0":
+ allRootUsers.append(line.split(":")[0])
# read shadow file and check for each user, if its uid0 or root.
uidUsersList = []
- for line in open('/etc/shadow', 'r'):
- user = line.split(':')[0]
+ for line in open("/etc/shadow", "r"):
+ user = line.split(":")[0]
if user in allRootUsers:
uidUsersList.append(user)
if passwd:
- LOG.info('Setting admin password')
- distro.set_passwd('root', passwd)
+ LOG.info("Setting admin password")
+ distro.set_passwd("root", passwd)
if resetPasswd:
self.reset_password(uidUsersList)
- LOG.info('Configure Password completed!')
+ LOG.info("Configure Password completed!")
def reset_password(self, uidUserList):
"""
@@ -54,15 +54,19 @@ class PasswordConfigurator(object):
not succeeded using passwd command. Log failure message otherwise.
@param: list of users for which to expire password.
"""
- LOG.info('Expiring password.')
+ LOG.info("Expiring password.")
for user in uidUserList:
try:
- subp.subp(['passwd', '--expire', user])
+ subp.subp(["passwd", "--expire", user])
except subp.ProcessExecutionError as e:
- if os.path.exists('/usr/bin/chage'):
- subp.subp(['chage', '-d', '0', user])
+ if os.path.exists("/usr/bin/chage"):
+ subp.subp(["chage", "-d", "0", user])
else:
- LOG.warning('Failed to expire password for %s with error: '
- '%s', user, e)
+ LOG.warning(
+ "Failed to expire password for %s with error: %s",
+ user,
+ e,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index 7ec06a9c..e99f9b43 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -9,4 +9,5 @@
class ConfigSource(object):
"""Specifies a source for the Config Content."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index 96d839b8..eda84cfb 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -13,4 +13,5 @@ class GuestCustErrorEnum(object):
GUESTCUST_ERROR_SCRIPT_DISABLED = 6
GUESTCUST_ERROR_WRONG_META_FORMAT = 9
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
index e84c1cb0..33169a7e 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
@@ -14,4 +14,5 @@ class GuestCustEventEnum(object):
GUESTCUST_EVENT_ENABLE_NICS = 103
GUESTCUST_EVENT_QUERY_NICS = 104
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
index a8211dea..c74fbc8b 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
@@ -12,4 +12,5 @@ class GuestCustStateEnum(object):
GUESTCUST_STATE_RUNNING = 4
GUESTCUST_STATE_DONE = 5
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index d919f693..08763e62 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -73,7 +73,7 @@ def get_nics_to_enable(nicsfilepath):
if not os.path.exists(nicsfilepath):
return None
- with open(nicsfilepath, 'r') as fp:
+ with open(nicsfilepath, "r") as fp:
nics = fp.read(NICS_SIZE)
return nics
@@ -95,7 +95,8 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
- nics)
+ nics,
+ )
if not out:
time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
continue
@@ -108,32 +109,36 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
- nics)
+ nics,
+ )
if out and out == NICS_STATUS_CONNECTED:
logger.info("NICS are connected on %d second", count)
return
time.sleep(enableNicsWaitSeconds)
- logger.warning("Can't connect network interfaces after %d attempts",
- enableNicsWaitRetries)
+ logger.warning(
+ "Can't connect network interfaces after %d attempts",
+ enableNicsWaitRetries,
+ )
def get_tools_config(section, key, defaultVal):
- """ Return the value of [section] key from VMTools configuration.
+ """Return the value of [section] key from VMTools configuration.
- @param section: String of section to read from VMTools config
- @returns: String value from key in [section] or defaultVal if
- [section] is not present or vmware-toolbox-cmd is
- not installed.
+ @param section: String of section to read from VMTools config
+ @returns: String value from key in [section] or defaultVal if
+ [section] is not present or vmware-toolbox-cmd is
+ not installed.
"""
- if not subp.which('vmware-toolbox-cmd'):
+ if not subp.which("vmware-toolbox-cmd"):
logger.debug(
- 'vmware-toolbox-cmd not installed, returning default value')
+ "vmware-toolbox-cmd not installed, returning default value"
+ )
return defaultVal
- cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
+ cmd = ["vmware-toolbox-cmd", "config", "get", section, key]
try:
(outText, _) = subp.subp(cmd)
@@ -141,22 +146,27 @@ def get_tools_config(section, key, defaultVal):
if e.exit_code == 69:
logger.debug(
"vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
- " Return default value: %s", " ".join(cmd), defaultVal)
+ " Return default value: %s",
+ " ".join(cmd),
+ defaultVal,
+ )
else:
logger.error("Failed running %s[%s]", cmd, e.exit_code)
logger.exception(e)
return defaultVal
retValue = defaultVal
- m = re.match(r'([^=]+)=(.*)', outText)
+ m = re.match(r"([^=]+)=(.*)", outText)
if m:
retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
+ logger.debug("Get tools config: [%s] %s = %s", section, key, retValue)
else:
logger.debug(
"Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
+ section,
+ key,
+ retValue,
+ )
return retValue
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
index d793bdeb..673204a0 100644
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -18,18 +18,19 @@ class Ipv4ModeEnum(object):
# The legacy mode which only allows dhcp/static based on whether IPv4
# addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
+ IPV4_MODE_BACKWARDS_COMPATIBLE = "BACKWARDS_COMPATIBLE"
# IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = 'STATIC'
+ IPV4_MODE_STATIC = "STATIC"
# IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = 'DHCP'
+ IPV4_MODE_DHCP = "DHCP"
# IPv4 must be disabled
- IPV4_MODE_DISABLED = 'DISABLED'
+ IPV4_MODE_DISABLED = "DISABLED"
# IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = 'AS_IS'
+ IPV4_MODE_AS_IS = "AS_IS"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index ef8f87f7..7b742d0f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -20,7 +20,7 @@ class Nic(NicBase):
self._configFile = configFile
def _get(self, what):
- return self._configFile.get(self.name + '|' + what, None)
+ return self._configFile.get(self.name + "|" + what, None)
def _get_count_with_prefix(self, prefix):
return self._configFile.get_count_with_prefix(self.name + prefix)
@@ -31,29 +31,29 @@ class Nic(NicBase):
@property
def mac(self):
- return self._get('MACADDR').lower()
+ return self._get("MACADDR").lower()
@property
def primary(self):
- value = self._get('PRIMARY')
+ value = self._get("PRIMARY")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def onboot(self):
- value = self._get('ONBOOT')
+ value = self._get("ONBOOT")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def bootProto(self):
- value = self._get('BOOTPROTO')
+ value = self._get("BOOTPROTO")
if value:
return value.lower()
else:
@@ -61,7 +61,7 @@ class Nic(NicBase):
@property
def ipv4_mode(self):
- value = self._get('IPv4_MODE')
+ value = self._get("IPv4_MODE")
if value:
return value.lower()
else:
@@ -80,7 +80,7 @@ class Nic(NicBase):
@property
def staticIpv6(self):
- cnt = self._get_count_with_prefix('|IPv6ADDR|')
+ cnt = self._get_count_with_prefix("|IPv6ADDR|")
if not cnt:
return None
@@ -100,17 +100,17 @@ class StaticIpv4Addr(StaticIpv4Base):
@property
def ip(self):
- return self._nic._get('IPADDR')
+ return self._nic._get("IPADDR")
@property
def netmask(self):
- return self._nic._get('NETMASK')
+ return self._nic._get("NETMASK")
@property
def gateways(self):
- value = self._nic._get('GATEWAY')
+ value = self._nic._get("GATEWAY")
if value:
- return [x.strip() for x in value.split(',')]
+ return [x.strip() for x in value.split(",")]
else:
return None
@@ -124,14 +124,15 @@ class StaticIpv6Addr(StaticIpv6Base):
@property
def ip(self):
- return self._nic._get('IPv6ADDR|' + str(self._index))
+ return self._nic._get("IPv6ADDR|" + str(self._index))
@property
def netmask(self):
- return self._nic._get('IPv6NETMASK|' + str(self._index))
+ return self._nic._get("IPv6NETMASK|" + str(self._index))
@property
def gateway(self):
- return self._nic._get('IPv6GATEWAY|' + str(self._index))
+ return self._nic._get("IPv6GATEWAY|" + str(self._index))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
index de7b866d..37d9602f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic_base.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py
@@ -18,7 +18,7 @@ class NicBase(object):
Retrieves the mac address of the nic
@return (str) : the MACADDR setting
"""
- raise NotImplementedError('MACADDR')
+ raise NotImplementedError("MACADDR")
@property
def primary(self):
@@ -29,7 +29,7 @@ class NicBase(object):
be set.
@return (bool): the PRIMARY setting
"""
- raise NotImplementedError('PRIMARY')
+ raise NotImplementedError("PRIMARY")
@property
def onboot(self):
@@ -37,7 +37,7 @@ class NicBase(object):
Retrieves whether the nic should be up at the boot time
@return (bool) : the ONBOOT setting
"""
- raise NotImplementedError('ONBOOT')
+ raise NotImplementedError("ONBOOT")
@property
def bootProto(self):
@@ -45,7 +45,7 @@ class NicBase(object):
Retrieves the boot protocol of the nic
@return (str): the BOOTPROTO setting, valid values: dhcp and static.
"""
- raise NotImplementedError('BOOTPROTO')
+ raise NotImplementedError("BOOTPROTO")
@property
def ipv4_mode(self):
@@ -54,7 +54,7 @@ class NicBase(object):
@return (str): the IPv4_MODE setting, valid values:
backwards_compatible, static, dhcp, disabled, as_is
"""
- raise NotImplementedError('IPv4_MODE')
+ raise NotImplementedError("IPv4_MODE")
@property
def staticIpv4(self):
@@ -62,7 +62,7 @@ class NicBase(object):
Retrieves the static IPv4 configuration of the nic
@return (StaticIpv4Base list): the static ipv4 setting
"""
- raise NotImplementedError('Static IPv4')
+ raise NotImplementedError("Static IPv4")
@property
def staticIpv6(self):
@@ -70,7 +70,7 @@ class NicBase(object):
Retrieves the IPv6 configuration of the nic
@return (StaticIpv6Base list): the static ipv6 setting
"""
- raise NotImplementedError('Static Ipv6')
+ raise NotImplementedError("Static Ipv6")
def validate(self):
"""
@@ -78,7 +78,7 @@ class NicBase(object):
For example, the staticIpv4 property is required and should not be
empty when ipv4Mode is STATIC
"""
- raise NotImplementedError('Check constraints on properties')
+ raise NotImplementedError("Check constraints on properties")
class StaticIpv4Base(object):
@@ -93,7 +93,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 address
@return (str): the IPADDR setting
"""
- raise NotImplementedError('Ipv4 Address')
+ raise NotImplementedError("Ipv4 Address")
@property
def netmask(self):
@@ -101,7 +101,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 NETMASK setting
@return (str): the NETMASK setting
"""
- raise NotImplementedError('Ipv4 NETMASK')
+ raise NotImplementedError("Ipv4 NETMASK")
@property
def gateways(self):
@@ -109,7 +109,7 @@ class StaticIpv4Base(object):
Retrieves the gateways on this Ipv4 subnet
@return (str list): the GATEWAY setting
"""
- raise NotImplementedError('Ipv4 GATEWAY')
+ raise NotImplementedError("Ipv4 GATEWAY")
class StaticIpv6Base(object):
@@ -123,7 +123,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 address
@return (str): the IPv6ADDR setting
"""
- raise NotImplementedError('Ipv6 Address')
+ raise NotImplementedError("Ipv6 Address")
@property
def netmask(self):
@@ -131,7 +131,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 NETMASK setting
@return (str): the IPv6NETMASK setting
"""
- raise NotImplementedError('Ipv6 NETMASK')
+ raise NotImplementedError("Ipv6 NETMASK")
@property
def gateway(self):
@@ -139,6 +139,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 GATEWAY setting
@return (str): the IPv6GATEWAY setting
"""
- raise NotImplementedError('Ipv6 GATEWAY')
+ raise NotImplementedError("Ipv6 GATEWAY")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index ad347bea..eb504eba 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -3,16 +3,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
import json
+from functools import lru_cache
-from cloudinit import log as log
-from cloudinit import url_helper
from cloudinit import dmi
-from cloudinit import util
-from cloudinit import net
-from cloudinit import netinfo
-from cloudinit import subp
+from cloudinit import log as log
+from cloudinit import net, netinfo, subp, url_helper, util
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from functools import lru_cache
# Get LOG
LOG = log.getLogger(__name__)
@@ -41,21 +37,21 @@ def set_route():
routes = netinfo.route_info()
# If no tools exist and empty dict is returned
- if 'ipv4' not in routes:
+ if "ipv4" not in routes:
return
# We only care about IPv4
- routes = routes['ipv4']
+ routes = routes["ipv4"]
# Searchable list
dests = []
# Parse each route into a more searchable format
for route in routes:
- dests.append(route['destination'])
+ dests.append(route["destination"])
- gw_present = '100.64.0.0' in dests or '100.64.0.0/10' in dests
- dest_present = '169.254.169.254' in dests
+ gw_present = "100.64.0.0" in dests or "100.64.0.0/10" in dests
+ dest_present = "169.254.169.254" in dests
# If not IPv6 only (No link local)
# or the route is already present
@@ -63,36 +59,32 @@ def set_route():
return
# Set metadata route
- if subp.which('ip'):
- subp.subp([
- 'ip',
- 'route',
- 'add',
- '169.254.169.254/32',
- 'dev',
- net.find_fallback_nic()
- ])
- elif subp.which('route'):
- subp.subp([
- 'route',
- 'add',
- '-net',
- '169.254.169.254/32',
- '100.64.0.1'
- ])
+ if subp.which("ip"):
+ subp.subp(
+ [
+ "ip",
+ "route",
+ "add",
+ "169.254.169.254/32",
+ "dev",
+ net.find_fallback_nic(),
+ ]
+ )
+ elif subp.which("route"):
+ subp.subp(["route", "add", "-net", "169.254.169.254/32", "100.64.0.1"])
# Read the system information from SMBIOS
def get_sysinfo():
return {
- 'manufacturer': dmi.read_dmi_data("system-manufacturer"),
- 'subid': dmi.read_dmi_data("system-serial-number")
+ "manufacturer": dmi.read_dmi_data("system-manufacturer"),
+ "subid": dmi.read_dmi_data("system-serial-number"),
}
# Assumes is Vultr is already checked
def is_baremetal():
- if get_sysinfo()['manufacturer'] != "Vultr":
+ if get_sysinfo()["manufacturer"] != "Vultr":
return True
return False
@@ -102,7 +94,7 @@ def is_vultr():
# VC2, VDC, and HFC use DMI
sysinfo = get_sysinfo()
- if sysinfo['manufacturer'] == "Vultr":
+ if sysinfo["manufacturer"] == "Vultr":
return True
# Baremetal requires a kernel parameter
@@ -118,20 +110,20 @@ def read_metadata(url, timeout, retries, sec_between, agent):
# Announce os details so we can handle non Vultr origin
# images and provide correct vendordata generation.
- headers = {
- 'Metadata-Token': 'cloudinit',
- 'User-Agent': agent
- }
+ headers = {"Metadata-Token": "cloudinit", "User-Agent": agent}
- response = url_helper.readurl(url,
- timeout=timeout,
- retries=retries,
- headers=headers,
- sec_between=sec_between)
+ response = url_helper.readurl(
+ url,
+ timeout=timeout,
+ retries=retries,
+ headers=headers,
+ sec_between=sec_between,
+ )
if not response.ok():
- raise RuntimeError("Failed to connect to %s: Code: %s" %
- url, response.code)
+ raise RuntimeError(
+ "Failed to connect to %s: Code: %s" % url, response.code
+ )
return response.contents.decode()
@@ -156,95 +148,82 @@ def get_interface_name(mac):
def generate_network_config(interfaces):
network = {
"version": 1,
- "config": [
- {
- "type": "nameserver",
- "address": [
- "108.61.10.10"
- ]
- }
- ]
+ "config": [{"type": "nameserver", "address": ["108.61.10.10"]}],
}
# Prepare interface 0, public
if len(interfaces) > 0:
public = generate_public_network_interface(interfaces[0])
- network['config'].append(public)
+ network["config"].append(public)
# Prepare additional interfaces, private
for i in range(1, len(interfaces)):
private = generate_private_network_interface(interfaces[i])
- network['config'].append(private)
+ network["config"].append(private)
return network
# Input Metadata and generate public network config part
def generate_public_network_interface(interface):
- interface_name = get_interface_name(interface['mac'])
+ interface_name = get_interface_name(interface["mac"])
if not interface_name:
raise RuntimeError(
- "Interface: %s could not be found on the system" %
- interface['mac'])
+ "Interface: %s could not be found on the system" % interface["mac"]
+ )
netcfg = {
"name": interface_name,
"type": "physical",
- "mac_address": interface['mac'],
+ "mac_address": interface["mac"],
"accept-ra": 1,
"subnets": [
- {
- "type": "dhcp",
- "control": "auto"
- },
- {
- "type": "ipv6_slaac",
- "control": "auto"
- },
- ]
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ ],
}
# Options that may or may not be used
if "mtu" in interface:
- netcfg['mtu'] = interface['mtu']
+ netcfg["mtu"] = interface["mtu"]
if "accept-ra" in interface:
- netcfg['accept-ra'] = interface['accept-ra']
+ netcfg["accept-ra"] = interface["accept-ra"]
if "routes" in interface:
- netcfg['subnets'][0]['routes'] = interface['routes']
+ netcfg["subnets"][0]["routes"] = interface["routes"]
# Check for additional IP's
- additional_count = len(interface['ipv4']['additional'])
+ additional_count = len(interface["ipv4"]["additional"])
if "ipv4" in interface and additional_count > 0:
- for additional in interface['ipv4']['additional']:
+ for additional in interface["ipv4"]["additional"]:
add = {
"type": "static",
"control": "auto",
- "address": additional['address'],
- "netmask": additional['netmask']
+ "address": additional["address"],
+ "netmask": additional["netmask"],
}
if "routes" in additional:
- add['routes'] = additional['routes']
+ add["routes"] = additional["routes"]
- netcfg['subnets'].append(add)
+ netcfg["subnets"].append(add)
# Check for additional IPv6's
- additional_count = len(interface['ipv6']['additional'])
+ additional_count = len(interface["ipv6"]["additional"])
if "ipv6" in interface and additional_count > 0:
- for additional in interface['ipv6']['additional']:
+ for additional in interface["ipv6"]["additional"]:
add = {
"type": "static6",
"control": "auto",
- "address": additional['address'],
- "netmask": additional['netmask']
+ "address": additional["address"],
+ "netmask": additional["netmask"],
}
if "routes" in additional:
- add['routes'] = additional['routes']
+ add["routes"] = additional["routes"]
- netcfg['subnets'].append(add)
+ netcfg["subnets"].append(add)
# Add config to template
return netcfg
@@ -252,35 +231,35 @@ def generate_public_network_interface(interface):
# Input Metadata and generate private network config part
def generate_private_network_interface(interface):
- interface_name = get_interface_name(interface['mac'])
+ interface_name = get_interface_name(interface["mac"])
if not interface_name:
raise RuntimeError(
- "Interface: %s could not be found on the system" %
- interface['mac'])
+ "Interface: %s could not be found on the system" % interface["mac"]
+ )
netcfg = {
"name": interface_name,
"type": "physical",
- "mac_address": interface['mac'],
+ "mac_address": interface["mac"],
"subnets": [
{
"type": "static",
"control": "auto",
- "address": interface['ipv4']['address'],
- "netmask": interface['ipv4']['netmask']
+ "address": interface["ipv4"]["address"],
+ "netmask": interface["ipv4"]["netmask"],
}
- ]
+ ],
}
# Options that may or may not be used
if "mtu" in interface:
- netcfg['mtu'] = interface['mtu']
+ netcfg["mtu"] = interface["mtu"]
if "accept-ra" in interface:
- netcfg['accept-ra'] = interface['accept-ra']
+ netcfg["accept-ra"] = interface["accept-ra"]
if "routes" in interface:
- netcfg['subnets'][0]['routes'] = interface['routes']
+ netcfg["subnets"][0]["routes"] = interface["routes"]
return netcfg
@@ -288,12 +267,13 @@ def generate_private_network_interface(interface):
# Make required adjustments to the network configs provided
def add_interface_names(interfaces):
for interface in interfaces:
- interface_name = get_interface_name(interface['mac'])
+ interface_name = get_interface_name(interface["mac"])
if not interface_name:
raise RuntimeError(
- "Interface: %s could not be found on the system" %
- interface['mac'])
- interface['name'] = interface_name
+ "Interface: %s could not be found on the system"
+ % interface["mac"]
+ )
+ interface["name"] = interface_name
return interfaces