summaryrefslogtreecommitdiff
path: root/cloudinit/sources/helpers
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/sources/helpers')
-rwxr-xr-xcloudinit/sources/helpers/azure.py524
-rw-r--r--cloudinit/sources/helpers/digitalocean.py21
-rw-r--r--cloudinit/sources/helpers/hetzner.py19
-rw-r--r--cloudinit/sources/helpers/netlink.py3
-rw-r--r--cloudinit/sources/helpers/openstack.py60
-rw-r--r--cloudinit/sources/helpers/tests/test_netlink.py167
-rw-r--r--cloudinit/sources/helpers/tests/test_openstack.py44
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py26
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py3
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py48
15 files changed, 678 insertions, 254 deletions
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index fc760581..b968a96f 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -1,5 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
+import base64
import json
import logging
import os
@@ -8,13 +8,16 @@ import socket
import struct
import time
import textwrap
+import zlib
+from cloudinit.settings import CFG_BUILTIN
from cloudinit.net import dhcp
from cloudinit import stages
from cloudinit import temp_utils
from contextlib import contextmanager
from xml.etree import ElementTree
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit import version
@@ -32,7 +35,14 @@ DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
BOOT_EVENT_TYPE = 'boot-telemetry'
SYSTEMINFO_EVENT_TYPE = 'system-info'
DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
-
+COMPRESSED_EVENT_TYPE = 'compressed'
+# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP
+# at once. This number is based on the analysis done on a large sample of
+# cloud-init.log files where the P95 of the file sizes was 537KB and the time
+# consumed to dump 500KB file was (P95:76, P99:233, P99.9:1170) in ms
+MAX_LOG_TO_KVP_LENGTH = 512000
+# Marker file to indicate whether cloud-init.log is pushed to KVP
+LOG_PUSHED_TO_KVP_MARKER_FILE = '/var/lib/cloud/data/log_pushed_to_kvp'
azure_ds_reporter = events.ReportEventStack(
name="azure-ds",
description="initialize reporter for azure ds",
@@ -64,13 +74,15 @@ def is_byte_swapped(previous_id, current_id):
return ''.join(dd)
parts = current_id.split('-')
- swapped_id = '-'.join([
+ swapped_id = '-'.join(
+ [
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
parts[4]
- ])
+ ]
+ )
return previous_id == swapped_id
@@ -86,11 +98,13 @@ def get_boot_telemetry():
LOG.debug("Collecting boot telemetry")
try:
kernel_start = float(time.time()) - float(util.uptime())
- except ValueError:
- raise RuntimeError("Failed to determine kernel start timestamp")
+ except ValueError as e:
+ raise RuntimeError(
+ "Failed to determine kernel start timestamp"
+ ) from e
try:
- out, _ = util.subp(['/bin/systemctl',
+ out, _ = subp.subp(['/bin/systemctl',
'show', '-p',
'UserspaceTimestampMonotonic'],
capture=True)
@@ -103,16 +117,17 @@ def get_boot_telemetry():
"UserspaceTimestampMonotonic from systemd")
user_start = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
- raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s"
- % e)
+ except subp.ProcessExecutionError as e:
+ raise RuntimeError(
+ "Failed to get UserspaceTimestampMonotonic: %s" % e
+ ) from e
except ValueError as e:
- raise RuntimeError("Failed to parse "
- "UserspaceTimestampMonotonic from systemd: %s"
- % e)
+ raise RuntimeError(
+ "Failed to parse UserspaceTimestampMonotonic from systemd: %s" % e
+ ) from e
try:
- out, _ = util.subp(['/bin/systemctl', 'show',
+ out, _ = subp.subp(['/bin/systemctl', 'show',
'cloud-init-local', '-p',
'InactiveExitTimestampMonotonic'],
capture=True)
@@ -124,13 +139,15 @@ def get_boot_telemetry():
"InactiveExitTimestampMonotonic from systemd")
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
- raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s"
- % e)
+ except subp.ProcessExecutionError as e:
+ raise RuntimeError(
+ "Failed to get InactiveExitTimestampMonotonic: %s" % e
+ ) from e
except ValueError as e:
- raise RuntimeError("Failed to parse "
- "InactiveExitTimestampMonotonic from systemd: %s"
- % e)
+ raise RuntimeError(
+ "Failed to parse InactiveExitTimestampMonotonic from systemd: %s"
+ % e
+ ) from e
evt = events.ReportingEvent(
BOOT_EVENT_TYPE, 'boot-telemetry',
@@ -174,6 +191,49 @@ def report_diagnostic_event(str):
return evt
+def report_compressed_event(event_name, event_content):
+ """Report a compressed event"""
+ compressed_data = base64.encodebytes(zlib.compress(event_content))
+ event_data = {"encoding": "gz+b64",
+ "data": compressed_data.decode('ascii')}
+ evt = events.ReportingEvent(
+ COMPRESSED_EVENT_TYPE, event_name,
+ json.dumps(event_data),
+ events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt,
+ excluded_handler_types={"log", "print", "webhook"})
+
+ # return the event for unit testing purpose
+ return evt
+
+
+@azure_ds_telemetry_reporter
+def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
+ """Push a portion of cloud-init.log file or the whole file to KVP
+ based on the file size.
+ If called more than once, it skips pushing the log file to KVP again."""
+
+ log_pushed_to_kvp = bool(os.path.isfile(LOG_PUSHED_TO_KVP_MARKER_FILE))
+ if log_pushed_to_kvp:
+ report_diagnostic_event("cloud-init.log is already pushed to KVP")
+ return
+
+ LOG.debug("Dumping cloud-init.log file to KVP")
+ try:
+ with open(file_name, "rb") as f:
+ f.seek(0, os.SEEK_END)
+ seek_index = max(f.tell() - MAX_LOG_TO_KVP_LENGTH, 0)
+ report_diagnostic_event(
+ "Dumping last {} bytes of cloud-init.log file to KVP".format(
+ f.tell() - seek_index))
+ f.seek(seek_index, os.SEEK_SET)
+ report_compressed_event("cloud-init.log", f.read())
+ util.write_file(LOG_PUSHED_TO_KVP_MARKER_FILE, '')
+ except Exception as ex:
+ report_diagnostic_event("Exception when dumping log file: %s" %
+ repr(ex))
+
+
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
@@ -192,7 +252,7 @@ def _get_dhcp_endpoint_option_name():
return azure_endpoint
-class AzureEndpointHttpClient(object):
+class AzureEndpointHttpClient:
headers = {
'x-ms-agent-name': 'WALinuxAgent',
@@ -210,57 +270,77 @@ class AzureEndpointHttpClient(object):
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
- return url_helper.read_file_or_url(url, headers=headers, timeout=5,
- retries=10)
+ return url_helper.readurl(url, headers=headers,
+ timeout=5, retries=10, sec_between=5)
def post(self, url, data=None, extra_headers=None):
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return url_helper.read_file_or_url(url, data=data, headers=headers,
- timeout=5, retries=10)
+ return url_helper.readurl(url, data=data, headers=headers,
+ timeout=5, retries=10, sec_between=5)
-class GoalState(object):
+class InvalidGoalStateXMLException(Exception):
+ """Raised when GoalState XML is invalid or has missing data."""
- def __init__(self, xml, http_client):
- self.http_client = http_client
- self.root = ElementTree.fromstring(xml)
- self._certificates_xml = None
- def _text_from_xpath(self, xpath):
- element = self.root.find(xpath)
- if element is not None:
- return element.text
- return None
+class GoalState:
- @property
- def container_id(self):
- return self._text_from_xpath('./Container/ContainerId')
+ def __init__(self, unparsed_xml, azure_endpoint_client):
+ """Parses a GoalState XML string and returns a GoalState object.
- @property
- def incarnation(self):
- return self._text_from_xpath('./Incarnation')
+ @param unparsed_xml: string representing a GoalState XML.
+ @param azure_endpoint_client: instance of AzureEndpointHttpClient
+ @return: GoalState object representing the GoalState XML string.
+ """
+ self.azure_endpoint_client = azure_endpoint_client
- @property
- def instance_id(self):
- return self._text_from_xpath(
+ try:
+ self.root = ElementTree.fromstring(unparsed_xml)
+ except ElementTree.ParseError as e:
+ msg = 'Failed to parse GoalState XML: %s'
+ LOG.warning(msg, e)
+ report_diagnostic_event(msg % (e,))
+ raise
+
+ self.container_id = self._text_from_xpath('./Container/ContainerId')
+ self.instance_id = self._text_from_xpath(
'./Container/RoleInstanceList/RoleInstance/InstanceId')
+ self.incarnation = self._text_from_xpath('./Incarnation')
+
+ for attr in ("container_id", "instance_id", "incarnation"):
+ if getattr(self, attr) is None:
+ msg = 'Missing %s in GoalState XML'
+ LOG.warning(msg, attr)
+ report_diagnostic_event(msg % (attr,))
+ raise InvalidGoalStateXMLException(msg)
+
+ self.certificates_xml = None
+ url = self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance'
+ '/Configuration/Certificates')
+ if url is not None:
+ with events.ReportEventStack(
+ name="get-certificates-xml",
+ description="get certificates xml",
+ parent=azure_ds_reporter):
+ self.certificates_xml = \
+ self.azure_endpoint_client.get(
+ url, secure=True).contents
+ if self.certificates_xml is None:
+ raise InvalidGoalStateXMLException(
+ 'Azure endpoint returned empty certificates xml.')
- @property
- def certificates_xml(self):
- if self._certificates_xml is None:
- url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
- if url is not None:
- self._certificates_xml = self.http_client.get(
- url, secure=True).contents
- return self._certificates_xml
+ def _text_from_xpath(self, xpath):
+ element = self.root.find(xpath)
+ if element is not None:
+ return element.text
+ return None
-class OpenSSLManager(object):
+class OpenSSLManager:
certificate_names = {
'private_key': 'TransportPrivate.pem',
@@ -282,7 +362,7 @@ class OpenSSLManager(object):
LOG.debug('Certificate already generated.')
return
with cd(self.tmpdir):
- util.subp([
+ subp.subp([
'openssl', 'req', '-x509', '-nodes', '-subj',
'/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
'-keyout', self.certificate_names['private_key'],
@@ -299,14 +379,14 @@ class OpenSSLManager(object):
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
cmd = ['openssl', 'x509', '-noout', action]
- result, _ = util.subp(cmd, data=cert)
+ result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
pub_key = self._run_x509_action('-pubkey', certificate)
keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
- ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
+ ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@azure_ds_telemetry_reporter
@@ -339,7 +419,7 @@ class OpenSSLManager(object):
certificates_content.encode('utf-8'),
]
with cd(self.tmpdir):
- out, _ = util.subp(
+ out, _ = subp.subp(
'openssl cms -decrypt -in /dev/stdin -inkey'
' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
' -password pass:'.format(**self.certificate_names),
@@ -367,25 +447,122 @@ class OpenSSLManager(object):
return keys
-class WALinuxAgentShim(object):
-
- REPORT_READY_XML_TEMPLATE = '\n'.join([
- '<?xml version="1.0" encoding="utf-8"?>',
- '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
- ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
- ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
- ' <Container>',
- ' <ContainerId>{container_id}</ContainerId>',
- ' <RoleInstanceList>',
- ' <Role>',
- ' <InstanceId>{instance_id}</InstanceId>',
- ' <Health>',
- ' <State>Ready</State>',
- ' </Health>',
- ' </Role>',
- ' </RoleInstanceList>',
- ' </Container>',
- '</Health>'])
+class GoalStateHealthReporter:
+
+ HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\
+ <?xml version="1.0" encoding="utf-8"?>
+ <Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xsd="http://www.w3.org/2001/XMLSchema">
+ <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>
+ <Container>
+ <ContainerId>{container_id}</ContainerId>
+ <RoleInstanceList>
+ <Role>
+ <InstanceId>{instance_id}</InstanceId>
+ <Health>
+ <State>{health_status}</State>
+ {health_detail_subsection}
+ </Health>
+ </Role>
+ </RoleInstanceList>
+ </Container>
+ </Health>
+ ''')
+
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\
+ <Details>
+ <SubStatus>{health_substatus}</SubStatus>
+ <Description>{health_description}</Description>
+ </Details>
+ ''')
+
+ PROVISIONING_SUCCESS_STATUS = 'Ready'
+
+ def __init__(self, goal_state, azure_endpoint_client, endpoint):
+ """Creates instance that will report provisioning status to an endpoint
+
+ @param goal_state: An instance of class GoalState that contains
+ goal state info such as incarnation, container id, and instance id.
+ These 3 values are needed when reporting the provisioning status
+ to Azure
+ @param azure_endpoint_client: Instance of class AzureEndpointHttpClient
+ @param endpoint: Endpoint (string) where the provisioning status report
+ will be sent to
+ @return: Instance of class GoalStateHealthReporter
+ """
+ self._goal_state = goal_state
+ self._azure_endpoint_client = azure_endpoint_client
+ self._endpoint = endpoint
+
+ @azure_ds_telemetry_reporter
+ def send_ready_signal(self):
+ document = self.build_report(
+ incarnation=self._goal_state.incarnation,
+ container_id=self._goal_state.container_id,
+ instance_id=self._goal_state.instance_id,
+ status=self.PROVISIONING_SUCCESS_STATUS)
+ LOG.debug('Reporting ready to Azure fabric.')
+ try:
+ self._post_health_report(document=document)
+ except Exception as e:
+ msg = "exception while reporting ready: %s" % e
+ LOG.error(msg)
+ report_diagnostic_event(msg)
+ raise
+
+ LOG.info('Reported ready to Azure fabric.')
+
+ def build_report(
+ self, incarnation, container_id, instance_id,
+ status, substatus=None, description=None):
+ health_detail = ''
+ if substatus is not None:
+ health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
+ health_substatus=substatus, health_description=description)
+
+ health_report = self.HEALTH_REPORT_XML_TEMPLATE.format(
+ incarnation=incarnation,
+ container_id=container_id,
+ instance_id=instance_id,
+ health_status=status,
+ health_detail_subsection=health_detail)
+
+ return health_report
+
+ @azure_ds_telemetry_reporter
+ def _post_health_report(self, document):
+ push_log_to_kvp()
+
+ # Whenever report_diagnostic_event(diagnostic_msg) is invoked in code,
+ # the diagnostic messages are written to special files
+ # (/var/opt/hyperv/.kvp_pool_*) as Hyper-V KVP messages.
+ # Hyper-V KVP message communication is done through these files,
+ # and KVP functionality is used to communicate and share diagnostic
+ # info with the Azure Host.
+ # The Azure Host will collect the VM's Hyper-V KVP diagnostic messages
+ # when cloud-init reports to fabric.
+ # When the Azure Host receives the health report signal, it will only
+ # collect and process whatever KVP diagnostic messages have been
+ # written to the KVP files.
+ # KVP messages that are published after the Azure Host receives the
+ # signal are ignored and unprocessed, so yield this thread to the
+ # Hyper-V KVP Reporting thread so that they are written.
+ # time.sleep(0) is a low-cost and proven method to yield the scheduler
+ # and ensure that events are flushed.
+ # See HyperVKvpReportingHandler class, which is a multi-threaded
+ # reporting handler that writes to the special KVP files.
+ time.sleep(0)
+
+ LOG.debug('Sending health report to Azure fabric.')
+ url = "http://{}/machine?comp=health".format(self._endpoint)
+ self._azure_endpoint_client.post(
+ url,
+ data=document,
+ extra_headers={'Content-Type': 'text/xml; charset=utf-8'})
+ LOG.debug('Successfully sent health report to Azure fabric')
+
+
+class WALinuxAgentShim:
def __init__(self, fallback_lease_file=None, dhcp_options=None):
LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
@@ -393,6 +570,7 @@ class WALinuxAgentShim(object):
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
+ self.azure_endpoint_client = None
self.lease_file = fallback_lease_file
def clean_up(self):
@@ -469,9 +647,10 @@ class WALinuxAgentShim(object):
try:
name = os.path.basename(hook_file).replace('.json', '')
dhcp_options[name] = json.loads(util.load_file((hook_file)))
- except ValueError:
+ except ValueError as e:
raise ValueError(
- '{_file} is not valid JSON data'.format(_file=hook_file))
+ '{_file} is not valid JSON data'.format(_file=hook_file)
+ ) from e
return dhcp_options
@staticmethod
@@ -491,7 +670,22 @@ class WALinuxAgentShim(object):
@staticmethod
@azure_ds_telemetry_reporter
def find_endpoint(fallback_lease_file=None, dhcp245=None):
+ """Finds and returns the Azure endpoint using various methods.
+
+ The Azure endpoint is searched in the following order:
+ 1. Endpoint from dhcp options (dhcp option 245).
+ 2. Endpoint from networkd.
+ 3. Endpoint from dhclient hook json.
+ 4. Endpoint from fallback lease file.
+ 5. The default Azure endpoint.
+
+ @param fallback_lease_file: Fallback lease file that will be used
+ during endpoint search.
+ @param dhcp245: dhcp options that will be used during endpoint search.
+ @return: Azure endpoint IP address.
+ """
value = None
+
if dhcp245 is not None:
value = dhcp245
LOG.debug("Using Azure Endpoint from dhcp options")
@@ -533,42 +727,128 @@ class WALinuxAgentShim(object):
@azure_ds_telemetry_reporter
def register_with_azure_and_fetch_data(self, pubkey_info=None):
+ """Gets the VM's GoalState from Azure, uses the GoalState information
+ to report ready/send the ready signal/provisioning complete signal to
+ Azure, and then uses pubkey_info to filter and obtain the user's
+ pubkeys from the GoalState.
+
+ @param pubkey_info: List of pubkey values and fingerprints which are
+ used to filter and obtain the user's pubkey values from the
+ GoalState.
+ @return: The list of user's authorized pubkey values.
+ """
if self.openssl_manager is None:
self.openssl_manager = OpenSSLManager()
- http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
+ if self.azure_endpoint_client is None:
+ self.azure_endpoint_client = AzureEndpointHttpClient(
+ self.openssl_manager.certificate)
+ goal_state = self._fetch_goal_state_from_azure()
+ ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
+ health_reporter = GoalStateHealthReporter(
+ goal_state, self.azure_endpoint_client, self.endpoint)
+ health_reporter.send_ready_signal()
+ return {'public-keys': ssh_keys}
+
+ @azure_ds_telemetry_reporter
+ def _fetch_goal_state_from_azure(self):
+ """Fetches the GoalState XML from the Azure endpoint, parses the XML,
+ and returns a GoalState object.
+
+ @return: GoalState object representing the GoalState XML
+ """
+ unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure()
+ return self._parse_raw_goal_state_xml(unparsed_goal_state_xml)
+
+ @azure_ds_telemetry_reporter
+ def _get_raw_goal_state_xml_from_azure(self):
+ """Fetches the GoalState XML from the Azure endpoint and returns
+ the XML as a string.
+
+ @return: GoalState XML string
+ """
+
LOG.info('Registering with Azure...')
- attempts = 0
- while True:
- try:
- response = http_client.get(
- 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
- except Exception as e:
- if attempts < 10:
- time.sleep(attempts + 1)
- else:
- report_diagnostic_event(
- "failed to register with Azure: %s" % e)
- raise
- else:
- break
- attempts += 1
+ url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint)
+ try:
+ response = self.azure_endpoint_client.get(url)
+ except Exception as e:
+ msg = 'failed to register with Azure: %s' % e
+ LOG.warning(msg)
+ report_diagnostic_event(msg)
+ raise
LOG.debug('Successfully fetched GoalState XML.')
- goal_state = GoalState(response.contents, http_client)
- report_diagnostic_event("container_id %s" % goal_state.container_id)
+ return response.contents
+
+ @azure_ds_telemetry_reporter
+ def _parse_raw_goal_state_xml(self, unparsed_goal_state_xml):
+ """Parses a GoalState XML string and returns a GoalState object.
+
+ @param unparsed_goal_state_xml: GoalState XML string
+ @return: GoalState object representing the GoalState XML
+ """
+ try:
+ goal_state = GoalState(
+ unparsed_goal_state_xml, self.azure_endpoint_client)
+ except Exception as e:
+ msg = 'Error processing GoalState XML: %s' % e
+ LOG.warning(msg)
+ report_diagnostic_event(msg)
+ raise
+ msg = ', '.join([
+ 'GoalState XML container id: %s' % goal_state.container_id,
+ 'GoalState XML instance id: %s' % goal_state.instance_id,
+ 'GoalState XML incarnation: %s' % goal_state.incarnation])
+ LOG.debug(msg)
+ report_diagnostic_event(msg)
+ return goal_state
+
+ @azure_ds_telemetry_reporter
+ def _get_user_pubkeys(self, goal_state, pubkey_info):
+ """Gets and filters the VM admin user's authorized pubkeys.
+
+ The admin user in this case is the username specified as "admin"
+ when deploying VMs on Azure.
+ See https://docs.microsoft.com/en-us/cli/azure/vm#az-vm-create.
+ cloud-init expects a straightforward array of keys to be dropped
+ into the admin user's authorized_keys file. Azure control plane exposes
+ multiple public keys to the VM via wireserver. Select just the
+ admin user's key(s) and return them, ignoring any other certs.
+
+ @param goal_state: GoalState object. The GoalState object contains
+ a certificate XML, which contains both the VM user's authorized
+ pubkeys and other non-user pubkeys, which are used for
+ MSI and protected extension handling.
+ @param pubkey_info: List of VM user pubkey dicts that were previously
+ obtained from provisioning data.
+ Each pubkey dict in this list can either have the format
+ pubkey['value'] or pubkey['fingerprint'].
+ Each pubkey['fingerprint'] in the list is used to filter
+ and obtain the actual pubkey value from the GoalState
+ certificates XML.
+ Each pubkey['value'] requires no further processing and is
+ immediately added to the return list.
+ @return: A list of the VM user's authorized pubkey values.
+ """
ssh_keys = []
if goal_state.certificates_xml is not None and pubkey_info is not None:
LOG.debug('Certificate XML found; parsing out public keys.')
keys_by_fingerprint = self.openssl_manager.parse_certificates(
goal_state.certificates_xml)
ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
- self._report_ready(goal_state, http_client)
- return {'public-keys': ssh_keys}
+ return ssh_keys
- def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info):
- """cloud-init expects a straightforward array of keys to be dropped
- into the user's authorized_keys file. Azure control plane exposes
- multiple public keys to the VM via wireserver. Select just the
- user's key(s) and return them, ignoring any other certs.
+ @staticmethod
+ def _filter_pubkeys(keys_by_fingerprint, pubkey_info):
+ """ Filter and return only the user's actual pubkeys.
+
+ @param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict
+ that was obtained from GoalState Certificates XML. May contain
+ non-user pubkeys.
+ @param pubkey_info: List of VM user pubkeys. Pubkey values are added
+ to the return list without further processing. Pubkey fingerprints
+ are used to filter and obtain the actual pubkey values from
+ keys_by_fingerprint.
+ @return: A list of the VM user's authorized pubkey values.
"""
keys = []
for pubkey in pubkey_info:
@@ -587,30 +867,6 @@ class WALinuxAgentShim(object):
return keys
- @azure_ds_telemetry_reporter
- def _report_ready(self, goal_state, http_client):
- LOG.debug('Reporting ready to Azure fabric.')
- document = self.REPORT_READY_XML_TEMPLATE.format(
- incarnation=goal_state.incarnation,
- container_id=goal_state.container_id,
- instance_id=goal_state.instance_id,
- )
- # Host will collect kvps when cloud-init reports ready.
- # some kvps might still be in the queue. We yield the scheduler
- # to make sure we process all kvps up till this point.
- time.sleep(0)
- try:
- http_client.post(
- "http://{0}/machine?comp=health".format(self.endpoint),
- data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
- )
- except Exception as e:
- report_diagnostic_event("exception while reporting ready: %s" % e)
- raise
-
- LOG.info('Reported ready to Azure fabric.')
-
@azure_ds_telemetry_reporter
def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
@@ -623,10 +879,16 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
shim.clean_up()
-class EphemeralDHCPv4WithReporting(object):
+def dhcp_log_cb(out, err):
+ report_diagnostic_event("dhclient output stream: %s" % out)
+ report_diagnostic_event("dhclient error stream: %s" % err)
+
+
+class EphemeralDHCPv4WithReporting:
def __init__(self, reporter, nic=None):
self.reporter = reporter
- self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic)
+ self.ephemeralDHCPv4 = EphemeralDHCPv4(
+ iface=nic, dhcp_log_func=dhcp_log_cb)
def __enter__(self):
with events.ReportEventStack(
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index 0e7cccac..b545c4d6 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,6 +8,7 @@ import random
from cloudinit import log as logging
from cloudinit import net as cloudnet
from cloudinit import url_helper
+from cloudinit import subp
from cloudinit import util
NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
@@ -15,7 +16,7 @@ NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
LOG = logging.getLogger(__name__)
-def assign_ipv4_link_local(nic=None):
+def assign_ipv4_link_local(distro, nic=None):
"""Bring up NIC using an address using link-local (ip4LL) IPs. On
DigitalOcean, the link-local domain is per-droplet routed, so there
is no risk of collisions. However, to be more safe, the ip4LL
@@ -23,7 +24,7 @@ def assign_ipv4_link_local(nic=None):
"""
if not nic:
- nic = get_link_local_nic()
+ nic = get_link_local_nic(distro)
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
@@ -36,14 +37,14 @@ def assign_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
- if not util.which('ip'):
+ if not subp.which('ip'):
raise RuntimeError("No 'ip' command available to configure ip4LL "
"address")
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic)
- util.subp(ip_link_cmd)
+ subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
@@ -53,8 +54,12 @@ def assign_ipv4_link_local(nic=None):
return nic
-def get_link_local_nic():
- nics = [f for f in cloudnet.get_devicelist() if cloudnet.is_physical(f)]
+def get_link_local_nic(distro):
+ nics = [
+ f
+ for f in cloudnet.get_devicelist()
+ if distro.networking.is_physical(f)
+ ]
if not nics:
return None
return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
@@ -74,7 +79,7 @@ def del_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("removed ip4LL addresses from %s", nic)
except Exception as e:
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 2554530d..72edb023 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -7,6 +7,9 @@ from cloudinit import log as logging
from cloudinit import url_helper
from cloudinit import util
+import base64
+import binascii
+
LOG = logging.getLogger(__name__)
@@ -24,3 +27,19 @@ def read_userdata(url, timeout=2, sec_between=2, retries=30):
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
+
+
+def maybe_b64decode(data: bytes) -> bytes:
+ """base64 decode data
+
+ If data is base64 encoded bytes, return b64decode(data).
+ If not, return data unmodified.
+
+ @param data: data as bytes. TypeError is raised if not bytes.
+ """
+ if not isinstance(data, bytes):
+ raise TypeError("data is '%s', expected bytes" % type(data))
+ try:
+ return base64.b64decode(data, validate=True)
+ except binascii.Error:
+ return data
diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py
index d377ae3d..c2ad587b 100644
--- a/cloudinit/sources/helpers/netlink.py
+++ b/cloudinit/sources/helpers/netlink.py
@@ -55,7 +55,6 @@ NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq',
class NetlinkCreateSocketError(RuntimeError):
'''Raised if netlink socket fails during create or bind.'''
- pass
def create_bound_netlink_socket():
@@ -75,7 +74,7 @@ def create_bound_netlink_socket():
netlink_socket.setblocking(0)
except socket.error as e:
msg = "Exception during netlink socket create: %s" % e
- raise NetlinkCreateSocketError(msg)
+ raise NetlinkCreateSocketError(msg) from e
LOG.debug("Created netlink socket")
return netlink_socket
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 441db506..65e020c5 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -16,6 +16,7 @@ from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit.sources import BrokenMetadata
@@ -68,6 +69,7 @@ KNOWN_PHYSICAL_TYPES = (
None,
'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
+ 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
'dvs',
'ethernet',
'hw_veb',
@@ -109,7 +111,7 @@ class SourceMixin(object):
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return device
@@ -278,8 +280,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
data = translator(data)
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
if found:
results[name] = data
@@ -289,8 +292,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
metadata['random_seed'] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
- raise BrokenMetadata("Badly formatted metadata"
- " random_seed entry: %s" % e)
+ raise BrokenMetadata(
+ "Badly formatted metadata random_seed entry: %s" % e
+ ) from e
# load any files that were provided
files = {}
@@ -302,8 +306,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
files[path] = self._read_content_path(item)
except Exception as e:
- raise BrokenMetadata("Failed to read provided "
- "file %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to read provided file %s: %s" % (path, e)
+ ) from e
results['files'] = files
# The 'network_config' item in metadata is a content pointer
@@ -315,8 +320,9 @@ class BaseReader(metaclass=abc.ABCMeta):
content = self._read_content_path(net_item, decode=True)
results['network_config'] = content
except IOError as e:
- raise BrokenMetadata("Failed to read network"
- " configuration: %s" % (e))
+ raise BrokenMetadata(
+ "Failed to read network configuration: %s" % (e)
+ ) from e
# To openstack, user can specify meta ('nova boot --meta=key=value')
# and those will appear under metadata['meta'].
@@ -368,8 +374,9 @@ class ConfigDriveReader(BaseReader):
try:
return util.load_json(self._path_read(path))
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
def read_v1(self):
"""Reads a version 1 formatted location.
@@ -393,13 +400,17 @@ class ConfigDriveReader(BaseReader):
path = found[name]
try:
contents = self._path_read(path)
- except IOError:
- raise BrokenMetadata("Failed to read: %s" % path)
+ except IOError as e:
+ raise BrokenMetadata("Failed to read: %s" % path) from e
try:
- md[key] = translator(contents)
+ # Disable not-callable pylint check; pylint isn't able to
+ # determine that every member of FILES_V1 has a callable in
+ # the appropriate position
+ md[key] = translator(contents) # pylint: disable=E1102
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
else:
md[key] = copy.deepcopy(default)
@@ -410,8 +421,11 @@ class ConfigDriveReader(BaseReader):
keydata = meta_js.get('public-keys', keydata)
if keydata:
lines = keydata.splitlines()
- md['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
+ md['public-keys'] = [
+ line
+ for line in lines
+ if len(line) and not line.startswith("#")
+ ]
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
@@ -673,11 +687,13 @@ def convert_net_json(network_json=None, known_macs=None):
raise ValueError("Unable to find a system nic for %s" % d)
d['name'] = known_macs[mac]
- for cfg, key, fmt, target in link_updates:
- if isinstance(target, (list, tuple)):
- cfg[key] = [fmt % link_id_info[l]['name'] for l in target]
+ for cfg, key, fmt, targets in link_updates:
+ if isinstance(targets, (list, tuple)):
+ cfg[key] = [
+ fmt % link_id_info[target]['name'] for target in targets
+ ]
else:
- cfg[key] = fmt % link_id_info[target]['name']
+ cfg[key] = fmt % link_id_info[targets]['name']
# Infiniband interfaces may be referenced in network_data.json by a 6 byte
# Ethernet MAC-style address, and we use that address to look up the
diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py
index c2898a16..10760bd6 100644
--- a/cloudinit/sources/helpers/tests/test_netlink.py
+++ b/cloudinit/sources/helpers/tests/test_netlink.py
@@ -87,7 +87,7 @@ class TestParseNetlinkMessage(CiTestCase):
data = None
with self.assertRaises(AssertionError) as context:
read_rta_oper_state(data)
- self.assertTrue('data is none', str(context.exception))
+ self.assertEqual('data is none', str(context.exception))
def test_read_invalid_rta_operstate_none(self):
'''read_rta_oper_state returns none if operstate is none'''
@@ -180,17 +180,22 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
other_ifname = "eth1"
expected_ifname = "eth0"
data_op_down_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_DOWN)
+ other_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_UP)
+ other_ifname, RTM_NEWLINK, OPER_UP
+ )
data_op_down_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_DOWN)
+ expected_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_op_down_eth1,
- data_op_up_eth1,
- data_op_down_eth0,
- data_op_up_eth0]
+ expected_ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_op_down_eth1,
+ data_op_up_eth1,
+ data_op_down_eth0,
+ data_op_up_eth0
+ ]
wait_for_media_disconnect_connect(m_socket, expected_ifname)
self.assertIn('Ignored netlink event on interface %s' % other_ifname,
self.logs.getvalue())
@@ -207,17 +212,23 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_getlink_down = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_DOWN)
+ ifname, RTM_GETLINK, OPER_DOWN
+ )
data_getlink_up = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_UP)
+ ifname, RTM_GETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_getlink_down,
- data_getlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_getlink_down,
+ data_getlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -233,19 +244,25 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_setlink_down = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_DOWN)
+ ifname, RTM_SETLINK, OPER_DOWN
+ )
data_setlink_up = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_UP)
+ ifname, RTM_SETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_setlink_down,
- data_setlink_up,
- data_newlink_down,
- data_newlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_setlink_down,
+ data_setlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -255,23 +272,30 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_notpresent = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_NOTPRESENT)
- data_op_lowerdown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_LOWERLAYERDOWN)
- data_op_testing = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_TESTING)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_up, data_op_up,
- data_op_dormant, data_op_up,
- data_op_notpresent, data_op_up,
- data_op_lowerdown, data_op_up,
- data_op_testing, data_op_up,
- data_op_unknown, data_op_up,
- data_op_down, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_notpresent = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_NOTPRESENT
+ )
+ data_op_lowerdown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN
+ )
+ data_op_testing = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_TESTING
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_up, data_op_up,
+ data_op_dormant, data_op_up,
+ data_op_notpresent, data_op_up,
+ data_op_lowerdown, data_op_up,
+ data_op_testing, data_op_up,
+ data_op_unknown, data_op_up,
+ data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 14)
@@ -281,12 +305,14 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_down, data_op_dormant,
- data_op_unknown, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT)
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN)
+ m_read_netlink_socket.side_effect = [
+ data_op_down, data_op_dormant,
+ data_op_unknown, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -300,9 +326,11 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
- m_read_netlink_socket.side_effect = [data_op_invalid, data_op_up,
- data_op_down, data_op_invalid,
- data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_op_invalid, data_op_up,
+ data_op_down, data_op_invalid,
+ data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 5)
@@ -333,8 +361,9 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_invalid1, data_invalid2,
- data_op_down, data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_invalid1, data_invalid2, data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -344,11 +373,15 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
bytes = ifname.encode("utf-8")
data = bytearray(96)
struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
- 3, bytes, 5, 16, int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
+ 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.return_value = data
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 1)
@@ -360,14 +393,18 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data1 = bytearray(112)
data2 = bytearray(32)
struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.side_effect = [data1, data2]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 2)
diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py
new file mode 100644
index 00000000..2bde1e3f
--- /dev/null
+++ b/cloudinit/sources/helpers/tests/test_openstack.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# ./cloudinit/sources/helpers/tests/test_openstack.py
+
+from cloudinit.sources.helpers import openstack
+from cloudinit.tests import helpers as test_helpers
+
+
+class TestConvertNetJson(test_helpers.CiTestCase):
+
+ def test_phy_types(self):
+ """Verify the different known physical types are handled."""
+ # network_data.json example from
+ # https://docs.openstack.org/nova/latest/user/metadata.html
+ mac0 = "fa:16:3e:9c:bf:3d"
+ net_json = {
+ "links": [
+ {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a",
+ "mtu": None, "type": "bridge",
+ "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"}
+ ],
+ "networks": [
+ {"id": "network0", "link": "tapcd9f6d46-4a",
+ "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
+ "type": "ipv4_dhcp"}
+ ],
+ "services": [{"address": "8.8.8.8", "type": "dns"}]
+ }
+ macs = {mac0: 'eth0'}
+
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'mac_address': 'fa:16:3e:9c:bf:3d',
+ 'mtu': None, 'name': 'eth0',
+ 'subnets': [{'type': 'dhcp4'}],
+ 'type': 'physical'},
+ {'address': '8.8.8.8', 'type': 'nameserver'}]}
+
+ for t in openstack.KNOWN_PHYSICAL_TYPES:
+ net_json["links"][0]["type"] = t
+ self.assertEqual(
+ expected,
+ openstack.convert_net_json(network_json=net_json,
+ known_macs=macs))
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 2eaeff34..7109aef3 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -25,6 +25,8 @@ class Config(object):
SUFFIX = 'DNS|SUFFIX|'
TIMEZONE = 'DATETIME|TIMEZONE'
UTC = 'DATETIME|UTC'
+ POST_GC_STATUS = 'MISC|POST-GC-STATUS'
+ DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
def __init__(self, configFile):
self._configFile = configFile
@@ -104,4 +106,28 @@ class Config(object):
def custom_script_name(self):
"""Return the name of custom (pre/post) script."""
return self._configFile.get(Config.CUSTOM_SCRIPT, None)
+
+ @property
+ def post_gc_status(self):
+ """Return whether to post guestinfo.gc.status VMX property."""
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = postGcStatus.lower()
+ if postGcStatus not in ('yes', 'no'):
+ raise ValueError('PostGcStatus value should be yes/no')
+ return postGcStatus == 'yes'
+
+ @property
+ def default_run_post_script(self):
+ """
+ Return enable-custom-scripts default value if enable-custom-scripts
+ is absent in VM Tools configuration
+ """
+ defaultRunPostScript = self._configFile.get(
+ Config.DEFAULT_RUN_POST_SCRIPT,
+ 'no')
+ defaultRunPostScript = defaultRunPostScript.lower()
+ if defaultRunPostScript not in ('yes', 'no'):
+ raise ValueError('defaultRunPostScript value should be yes/no')
+ return defaultRunPostScript == 'yes'
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 9f14770e..2ab22de9 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,6 +9,7 @@ import logging
import os
import stat
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -61,7 +62,7 @@ class PreCustomScript(RunCustomScript):
"""Executing custom script with precustomization argument."""
LOG.debug("Executing pre-customization script")
self.prepare_script()
- util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
+ subp.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
class PostCustomScript(RunCustomScript):
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 602af078..fc034c95 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -22,7 +22,6 @@ class ConfigFile(ConfigSource, dict):
def __init__(self, filename):
self._loadConfigFile(filename)
- pass
def _insertKey(self, key, val):
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 2f29edd4..5899d8f7 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -10,6 +10,5 @@ from .config_source import ConfigSource
class ConfigNamespace(ConfigSource):
"""Specifies the Config Namespace."""
- pass
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 77cbf3b6..3745a262 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -10,6 +10,7 @@ import os
import re
from cloudinit.net.network_state import mask_to_net_prefix
+from cloudinit import subp
from cloudinit import util
logger = logging.getLogger(__name__)
@@ -73,7 +74,7 @@ class NicConfigurator(object):
The mac address(es) are in the lower case
"""
cmd = ['ip', 'addr', 'show']
- output, _err = util.subp(cmd)
+ output, _err = subp.subp(cmd)
sections = re.split(r'\n\d+: ', '\n' + output)[1:]
macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
@@ -248,8 +249,8 @@ class NicConfigurator(object):
logger.info('Clearing DHCP leases')
# Ignore the return code 1.
- util.subp(["pkill", "dhclient"], rcs=[0, 1])
- util.subp(["rm", "-f", "/var/lib/dhcp/*"])
+ subp.subp(["pkill", "dhclient"], rcs=[0, 1])
+ subp.subp(["rm", "-f", "/var/lib/dhcp/*"])
def configure(self, osfamily=None):
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index 8c91fa41..d16a7690 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,6 +9,7 @@
import logging
import os
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -56,10 +57,10 @@ class PasswordConfigurator(object):
LOG.info('Expiring password.')
for user in uidUserList:
try:
- util.subp(['passwd', '--expire', user])
- except util.ProcessExecutionError as e:
+ subp.subp(['passwd', '--expire', user])
+ except subp.ProcessExecutionError as e:
if os.path.exists('/usr/bin/chage'):
- util.subp(['chage', '-d', '0', user])
+ subp.subp(['chage', '-d', '0', user])
else:
LOG.warning('Failed to expire password for %s with error: '
'%s', user, e)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index 2f8ea546..7ec06a9c 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -8,6 +8,5 @@
class ConfigSource(object):
"""Specifies a source for the Config Content."""
- pass
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 3d369d04..d919f693 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -10,7 +10,7 @@ import os
import re
import time
-from cloudinit import util
+from cloudinit import subp
from .guestcust_event import GuestCustEventEnum
from .guestcust_state import GuestCustStateEnum
@@ -34,7 +34,7 @@ def send_rpc(rpc):
try:
logger.debug("Sending RPC command: %s", rpc)
- (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
+ (out, err) = subp.subp(["vmware-rpctool", rpc], rcs=[0])
# Remove the trailing newline in the output.
if out:
out = out.rstrip()
@@ -128,30 +128,46 @@ def get_tools_config(section, key, defaultVal):
not installed.
"""
- if not util.which('vmware-toolbox-cmd'):
+ if not subp.which('vmware-toolbox-cmd'):
logger.debug(
'vmware-toolbox-cmd not installed, returning default value')
return defaultVal
- retValue = defaultVal
cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
try:
- (outText, _) = util.subp(cmd)
- m = re.match(r'([^=]+)=(.*)', outText)
- if m:
- retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
- else:
+ (outText, _) = subp.subp(cmd)
+ except subp.ProcessExecutionError as e:
+ if e.exit_code == 69:
logger.debug(
- "Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
- except util.ProcessExecutionError as e:
- logger.error("Failed running %s[%s]", cmd, e.exit_code)
- logger.exception(e)
+ "vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
+ " Return default value: %s", " ".join(cmd), defaultVal)
+ else:
+ logger.error("Failed running %s[%s]", cmd, e.exit_code)
+ logger.exception(e)
+ return defaultVal
+
+ retValue = defaultVal
+ m = re.match(r'([^=]+)=(.*)', outText)
+ if m:
+ retValue = m.group(2).strip()
+ logger.debug("Get tools config: [%s] %s = %s",
+ section, key, retValue)
+ else:
+ logger.debug(
+ "Tools config: [%s] %s is not found, return default value: %s",
+ section, key, retValue)
return retValue
+# Sets message to the VMX guestinfo.gc.status property to the
+# underlying VMware Virtualization Platform.
+def set_gc_status(config, gcMsg):
+ if config and config.post_gc_status:
+ rpc = "info-set guestinfo.gc.status %s" % gcMsg
+ return send_rpc(rpc)
+ return None
+
+
# vi: ts=4 expandtab