summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
authorzsdc <taras@vyos.io>2020-09-15 17:05:20 +0300
committerzsdc <taras@vyos.io>2020-09-15 17:05:20 +0300
commit7cd260b313267dc7123cb99a75d4555e24909cca (patch)
treef57f3db085a724df237ffa64b589c6bb6dd3b28f /cloudinit/sources
parent1a790ee102fd405e5c3a20a17a69ba0c118ed874 (diff)
parent948bd9c1fcd08346cf8ec0551d7f6c2b234e896b (diff)
downloadvyos-cloud-init-7cd260b313267dc7123cb99a75d4555e24909cca.tar.gz
vyos-cloud-init-7cd260b313267dc7123cb99a75d4555e24909cca.zip
T2117: Cloud-init updated to 20.3
Merged with 20.3 tag from the upstream Cloud-init repository
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py8
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py167
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py3
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py11
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py2
-rw-r--r--cloudinit/sources/DataSourceEc2.py200
-rw-r--r--cloudinit/sources/DataSourceGCE.py2
-rw-r--r--cloudinit/sources/DataSourceHetzner.py15
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py6
-rw-r--r--cloudinit/sources/DataSourceMAAS.py5
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py24
-rw-r--r--cloudinit/sources/DataSourceOVF.py63
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py54
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py9
-rw-r--r--cloudinit/sources/DataSourceOracle.py396
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py34
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py9
-rw-r--r--cloudinit/sources/__init__.py55
-rwxr-xr-xcloudinit/sources/helpers/azure.py524
-rw-r--r--cloudinit/sources/helpers/digitalocean.py21
-rw-r--r--cloudinit/sources/helpers/hetzner.py19
-rw-r--r--cloudinit/sources/helpers/netlink.py3
-rw-r--r--cloudinit/sources/helpers/openstack.py60
-rw-r--r--cloudinit/sources/helpers/tests/test_netlink.py167
-rw-r--r--cloudinit/sources/helpers/tests/test_openstack.py44
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py26
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py3
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py48
-rw-r--r--cloudinit/sources/tests/test_init.py156
-rw-r--r--cloudinit/sources/tests/test_oracle.py1032
35 files changed, 1983 insertions, 1200 deletions
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 5270fda8..ac3ecc3d 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -18,9 +18,9 @@ import os.path
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
-from cloudinit.util import ProcessExecutionError
LOG = logging.getLogger(__name__)
@@ -192,7 +192,7 @@ class DataSourceAltCloud(sources.DataSource):
# modprobe floppy
try:
modprobe_floppy()
- except ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, 'Failed modprobe: %s', e)
return False
@@ -201,7 +201,7 @@ class DataSourceAltCloud(sources.DataSource):
# udevadm settle for floppy device
try:
util.udevadm_settle(exists=floppy_dev, timeout=5)
- except (ProcessExecutionError, OSError) as e:
+ except (subp.ProcessExecutionError, OSError) as e:
util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
return False
@@ -261,7 +261,7 @@ class DataSourceAltCloud(sources.DataSource):
def modprobe_floppy():
- out, _err = util.subp(CMD_PROBE_FLOPPY)
+ out, _err = subp.subp(CMD_PROBE_FLOPPY)
LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 61ec522a..f3c6452b 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -8,7 +8,6 @@ import base64
import contextlib
import crypt
from functools import partial
-import json
import os
import os.path
import re
@@ -19,9 +18,11 @@ import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit import net
from cloudinit.event import EventType
+from cloudinit.net import device_driver
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
from cloudinit.sources.helpers import netlink
+from cloudinit import subp
from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
from cloudinit import util
from cloudinit.reporting import events
@@ -34,7 +35,9 @@ from cloudinit.sources.helpers.azure import (
get_system_info,
report_diagnostic_event,
EphemeralDHCPv4WithReporting,
- is_byte_swapped)
+ is_byte_swapped,
+ dhcp_log_cb,
+ push_log_to_kvp)
LOG = logging.getLogger(__name__)
@@ -139,8 +142,8 @@ def find_dev_from_busdev(camcontrol_out, busdev):
def execute_or_debug(cmd, fail_ret=None):
try:
- return util.subp(cmd)[0]
- except util.ProcessExecutionError:
+ return subp.subp(cmd)[0]
+ except subp.ProcessExecutionError:
LOG.debug("Failed to execute: %s", ' '.join(cmd))
return fail_ret
@@ -164,12 +167,11 @@ def get_resource_disk_on_freebsd(port_id):
port_id = port_id - 2
g1 = "000" + str(port_id)
g0g1 = "{0}-{1}".format(g0, g1)
- """
- search 'X' from
- 'dev.storvsc.X.%pnpinfo:
- classid=32412632-86cb-44a2-9b5c-50d1417354f5
- deviceid=00000000-0001-8899-0000-000000000000'
- """
+
+ # search 'X' from
+ # 'dev.storvsc.X.%pnpinfo:
+ # classid=32412632-86cb-44a2-9b5c-50d1417354f5
+ # deviceid=00000000-0001-8899-0000-000000000000'
sysctl_out = get_dev_storvsc_sysctl()
storvscid = find_storvscid_from_sysctl_pnpinfo(sysctl_out, g0g1)
@@ -252,11 +254,11 @@ DEF_PASSWD_REDACTION = 'REDACTED'
def get_hostname(hostname_command='hostname'):
if not isinstance(hostname_command, (list, tuple)):
hostname_command = (hostname_command,)
- return util.subp(hostname_command, capture=True)[0].strip()
+ return subp.subp(hostname_command, capture=True)[0].strip()
def set_hostname(hostname, hostname_command='hostname'):
- util.subp([hostname_command, hostname])
+ subp.subp([hostname_command, hostname])
@azure_ds_telemetry_reporter
@@ -275,7 +277,14 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
(previous_hostname == temp_hostname and policy != 'force')):
yield None
return
- set_hostname(temp_hostname, hostname_command)
+ try:
+ set_hostname(temp_hostname, hostname_command)
+ except Exception as e:
+ msg = 'Failed setting temporary hostname: %s' % e
+ report_diagnostic_event(msg)
+ LOG.warning(msg)
+ yield None
+ return
try:
yield previous_hostname
finally:
@@ -343,7 +352,7 @@ class DataSourceAzure(sources.DataSource):
try:
invoke_agent(agent_cmd)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
# claim the datasource even if the command failed
util.logexc(LOG, "agent command '%s' failed.",
self.ds_cfg['agent_command'])
@@ -522,8 +531,9 @@ class DataSourceAzure(sources.DataSource):
try:
crawled_data = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self.crawl_metadata
+ )
except sources.InvalidMetaDataException as e:
LOG.warning('Could not crawl Azure metadata: %s', e)
return False
@@ -596,25 +606,35 @@ class DataSourceAzure(sources.DataSource):
return_val = None
def exc_cb(msg, exception):
- if isinstance(exception, UrlError) and exception.code == 404:
- if self.imds_poll_counter == self.imds_logging_threshold:
- # Reducing the logging frequency as we are polling IMDS
- self.imds_logging_threshold *= 2
- LOG.debug("Call to IMDS with arguments %s failed "
- "with status code %s after %s retries",
- msg, exception.code, self.imds_poll_counter)
- LOG.debug("Backing off logging threshold for the same "
- "exception to %d", self.imds_logging_threshold)
- self.imds_poll_counter += 1
- return True
-
- # If we get an exception while trying to call IMDS, we
- # call DHCP and setup the ephemeral network to acquire the new IP.
- LOG.debug("Call to IMDS with arguments %s failed with "
- "status code %s", msg, exception.code)
- report_diagnostic_event("polling IMDS failed with exception %s"
- % exception.code)
- return False
+ if isinstance(exception, UrlError):
+ if exception.code in (404, 410):
+ if self.imds_poll_counter == self.imds_logging_threshold:
+ # Reducing the logging frequency as we are polling IMDS
+ self.imds_logging_threshold *= 2
+ LOG.debug("Call to IMDS with arguments %s failed "
+ "with status code %s after %s retries",
+ msg, exception.code, self.imds_poll_counter)
+ LOG.debug("Backing off logging threshold for the same "
+ "exception to %d",
+ self.imds_logging_threshold)
+ report_diagnostic_event("poll IMDS with %s failed. "
+ "Exception: %s and code: %s" %
+ (msg, exception.cause,
+ exception.code))
+ self.imds_poll_counter += 1
+ return True
+ else:
+ # If we get an exception while trying to call IMDS, we call
+ # DHCP and setup the ephemeral network to acquire a new IP.
+ report_diagnostic_event("poll IMDS with %s failed. "
+ "Exception: %s and code: %s" %
+ (msg, exception.cause,
+ exception.code))
+ return False
+
+ LOG.debug("poll IMDS failed with an unexpected exception: %s",
+ exception)
+ return False
LOG.debug("Wait for vnetswitch to happen")
while True:
@@ -624,7 +644,8 @@ class DataSourceAzure(sources.DataSource):
name="obtain-dhcp-lease",
description="obtain dhcp lease",
parent=azure_ds_reporter):
- self._ephemeral_dhcp_ctx = EphemeralDHCPv4()
+ self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
+ dhcp_log_func=dhcp_log_cb)
lease = self._ephemeral_dhcp_ctx.obtain_lease()
if vnet_switched:
@@ -675,7 +696,6 @@ class DataSourceAzure(sources.DataSource):
except UrlError:
# Teardown our EphemeralDHCPv4 context on failure as we retry
self._ephemeral_dhcp_ctx.clean_network()
- pass
finally:
if nl_sock:
nl_sock.close()
@@ -771,9 +791,12 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
- address_ephemeral_resize(is_new_instance=is_new_instance,
- preserve_ntfs=self.ds_cfg.get(
- DS_CFG_KEY_PRESERVE_NTFS, False))
+ try:
+ address_ephemeral_resize(is_new_instance=is_new_instance,
+ preserve_ntfs=self.ds_cfg.get(
+ DS_CFG_KEY_PRESERVE_NTFS, False))
+ finally:
+ push_log_to_kvp(self.sys_cfg['def_log_file'])
return
@property
@@ -882,9 +905,10 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
(cand_part, cand_path, devpath))
with events.ReportEventStack(
- name="mount-ntfs-and-count",
- description="mount-ntfs-and-count",
- parent=azure_ds_reporter) as evt:
+ name="mount-ntfs-and-count",
+ description="mount-ntfs-and-count",
+ parent=azure_ds_reporter
+ ) as evt:
try:
file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
update_env_for_mount={'LANG': 'C'})
@@ -913,9 +937,10 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
# wait for ephemeral disk to come up
naplen = .2
with events.ReportEventStack(
- name="wait-for-ephemeral-disk",
- description="wait for ephemeral disk",
- parent=azure_ds_reporter):
+ name="wait-for-ephemeral-disk",
+ description="wait for ephemeral disk",
+ parent=azure_ds_reporter
+ ):
missing = util.wait_for_files([devpath],
maxwait=maxwait,
naplen=naplen,
@@ -972,7 +997,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
if command == "builtin":
if util.is_FreeBSD():
command = BOUNCE_COMMAND_FREEBSD
- elif util.which('ifup'):
+ elif subp.which('ifup'):
command = BOUNCE_COMMAND_IFUP
else:
LOG.debug(
@@ -983,7 +1008,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
shell = not isinstance(command, (list, tuple))
# capture=False, see comments in bug 1202758 and bug 1206164.
util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
+ get_uptime=True, func=subp.subp,
kwargs={'args': command, 'shell': shell, 'capture': False,
'env': env})
return True
@@ -993,7 +1018,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
def crtfile_to_pubkey(fname, data=None):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname],
+ (out, _err) = subp.subp(['sh', '-c', pipeline, fname],
capture=True, data=data)
return out.rstrip()
@@ -1005,7 +1030,7 @@ def pubkeys_from_crt_files(flist):
for fname in flist:
try:
pubkeys.append(crtfile_to_pubkey(fname))
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
errors.append(fname)
if errors:
@@ -1047,7 +1072,7 @@ def invoke_agent(cmd):
# this is a function itself to simplify patching it for test
if cmd:
LOG.debug("invoking agent: %s", cmd)
- util.subp(cmd, shell=(not isinstance(cmd, list)))
+ subp.subp(cmd, shell=(not isinstance(cmd, list)))
else:
LOG.debug("not invoking agent")
@@ -1122,7 +1147,7 @@ def read_azure_ovf(contents):
except Exception as e:
error_str = "Invalid ovf-env.xml: %s" % e
report_diagnostic_event(error_str)
- raise BrokenAzureDataSource(error_str)
+ raise BrokenAzureDataSource(error_str) from e
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
@@ -1323,9 +1348,10 @@ def parse_network_config(imds_metadata):
@return: Dictionary containing network version 2 standard configuration.
"""
with events.ReportEventStack(
- name="parse_network_config",
- description="",
- parent=azure_ds_reporter) as evt:
+ name="parse_network_config",
+ description="",
+ parent=azure_ds_reporter
+ ) as evt:
if imds_metadata != sources.UNSET and imds_metadata:
netconfig = {'version': 2, 'ethernets': {}}
LOG.debug('Azure: generating network configuration from IMDS')
@@ -1362,9 +1388,16 @@ def parse_network_config(imds_metadata):
ip=privateIp, prefix=netPrefix))
if dev_config:
mac = ':'.join(re.findall(r'..', intf['macAddress']))
- dev_config.update(
- {'match': {'macaddress': mac.lower()},
- 'set-name': nicname})
+ dev_config.update({
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nicname
+ })
+ # With netvsc, we can get two interfaces that
+ # share the same MAC, so we need to make sure
+ # our match condition also contains the driver
+ driver = device_driver(nicname)
+ if driver and driver == 'hv_netvsc':
+ dev_config['match']['driver'] = driver
netconfig['ethernets'][nicname] = dev_config
evt.description = "network config from imds"
else:
@@ -1422,8 +1455,14 @@ def _get_metadata_from_imds(retries):
LOG.debug(msg)
return {}
try:
+ from json.decoder import JSONDecodeError
+ json_decode_error = JSONDecodeError
+ except ImportError:
+ json_decode_error = ValueError
+
+ try:
return util.load_json(str(response))
- except json.decoder.JSONDecodeError as e:
+ except json_decode_error as e:
report_diagnostic_event('non-json imds response' % e)
LOG.warning(
'Ignoring non-json IMDS instance metadata: %s', str(response))
@@ -1468,12 +1507,12 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
def _is_platform_viable(seed_dir):
+ """Check platform environment to report if this datasource may run."""
with events.ReportEventStack(
- name="check-platform-viability",
- description="found azure asset tag",
- parent=azure_ds_reporter) as evt:
-
- """Check platform environment to report if this datasource may run."""
+ name="check-platform-viability",
+ description="found azure asset tag",
+ parent=azure_ds_reporter
+ ) as evt:
asset_tag = util.read_dmi_data('chassis-asset-tag')
if asset_tag == AZURE_CHASSIS_ASSET_TAG:
return True
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 2013bed7..54810439 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -22,6 +22,7 @@ from cloudinit import log as logging
from cloudinit.net import dhcp
from cloudinit import sources
from cloudinit import url_helper as uhelp
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -46,7 +47,7 @@ class CloudStackPasswordServerClient(object):
# The password server was in the past, a broken HTTP server, but is now
# fixed. wget handles this seamlessly, so it's easier to shell out to
# that rather than write our own handling code.
- output, _ = util.subp([
+ output, _ = subp.subp([
'wget', '--quiet', '--tries', '3', '--timeout', '20',
'--output-document', '-', '--header',
'DomU_Request: {0}'.format(domu_request),
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index f77923c2..62756cf7 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -10,6 +10,7 @@ import os
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.net import eni
@@ -71,11 +72,11 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if not found:
dslist = self.sys_cfg.get('datasource_list')
for dev in find_candidate_devs(dslist=dslist):
- try:
- if util.is_FreeBSD() and dev.startswith("/dev/cd"):
+ mtype = None
+ if util.is_BSD():
+ if dev.startswith("/dev/cd"):
mtype = "cd9660"
- else:
- mtype = None
+ try:
results = util.mount_cb(dev, read_config_drive,
mtype=mtype)
found = dev
@@ -245,7 +246,7 @@ def find_candidate_devs(probe_optical=True, dslist=None):
for device in OPTICAL_DEVICES:
try:
util.find_devs_with(path=device)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
by_fstype = []
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index e0ef665e..5040ce5b 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -58,7 +58,7 @@ class DataSourceDigitalOcean(sources.DataSource):
ipv4LL_nic = None
if self.use_ip4LL:
- ipv4LL_nic = do_helper.assign_ipv4_link_local()
+ ipv4LL_nic = do_helper.assign_ipv4_link_local(self.distro)
md = do_helper.read_metadata(
self.metadata_address, timeout=self.timeout,
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 0f2bfef4..1d09c12a 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -29,7 +29,6 @@ STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
API_TOKEN_ROUTE = 'latest/api/token'
-API_TOKEN_DISABLED = '_ec2_disable_api_token'
AWS_TOKEN_TTL_SECONDS = '21600'
AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
@@ -63,7 +62,7 @@ class DataSourceEc2(sources.DataSource):
# Priority ordered list of additional metadata versions which will be tried
# for extended metadata content. IPv6 support comes in 2016-09-02
- extended_metadata_versions = ['2016-09-02']
+ extended_metadata_versions = ['2018-09-24', '2016-09-02']
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -193,6 +192,12 @@ class DataSourceEc2(sources.DataSource):
return self.metadata['instance-id']
def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
+ """ Get an API token for EC2 Instance Metadata Service.
+
+ On EC2. IMDS will always answer an API token, unless
+ the instance owner has disabled the IMDS HTTP endpoint or
+ the network topology conflicts with the configured hop-limit.
+ """
if self.cloud_name != CloudNames.AWS:
return
@@ -205,18 +210,33 @@ class DataSourceEc2(sources.DataSource):
urls.append(cur)
url2base[cur] = url
- # use the self._status_cb to check for Read errors, which means
- # we can't reach the API token URL, so we should disable IMDSv2
+ # use the self._imds_exception_cb to check for Read errors
LOG.debug('Fetching Ec2 IMDSv2 API Token')
- url, response = uhelp.wait_for_url(
- urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb,
- headers_cb=self._get_headers, request_method=request_method,
- headers_redact=AWS_TOKEN_REDACT)
+
+ response = None
+ url = None
+ url_params = self.get_url_params()
+ try:
+ url, response = uhelp.wait_for_url(
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ headers_cb=self._get_headers,
+ exception_cb=self._imds_exception_cb,
+ request_method=request_method,
+ headers_redact=AWS_TOKEN_REDACT)
+ except uhelp.UrlError:
+ # We use the raised exception to interupt the retry loop.
+ # Nothing else to do here.
+ pass
if url and response:
self._api_token = response
return url2base[url]
+ # If we get here, then wait_for_url timed out, waiting for IMDS
+ # or the IMDS HTTP endpoint is disabled
+ return None
+
def wait_for_metadata_service(self):
mcfg = self.ds_cfg
@@ -240,9 +260,11 @@ class DataSourceEc2(sources.DataSource):
# try the api token path first
metadata_address = self._maybe_fetch_api_token(mdurls)
- if not metadata_address:
- if self._api_token == API_TOKEN_DISABLED:
- LOG.warning('Retrying with IMDSv1')
+ # When running on EC2, we always access IMDS with an API token.
+ # If we could not get an API token, then we assume the IMDS
+ # endpoint was disabled and we move on without a data source.
+ # Fallback to IMDSv1 if not running on EC2
+ if not metadata_address and self.cloud_name != CloudNames.AWS:
# if we can't get a token, use instance-id path
urls = []
url2base = {}
@@ -267,6 +289,8 @@ class DataSourceEc2(sources.DataSource):
if metadata_address:
self.metadata_address = metadata_address
LOG.debug("Using metadata source: '%s'", self.metadata_address)
+ elif self.cloud_name == CloudNames.AWS:
+ LOG.warning("IMDS's HTTP endpoint is probably disabled")
else:
LOG.critical("Giving up on md from %s after %s seconds",
urls, int(time.time() - start_time))
@@ -381,13 +405,16 @@ class DataSourceEc2(sources.DataSource):
logfunc=LOG.debug, msg='Re-crawl of metadata service',
func=self.get_data)
- # Limit network configuration to only the primary/fallback nic
iface = self.fallback_interface
- macs_to_nics = {net.get_interface_mac(iface): iface}
net_md = self.metadata.get('network')
if isinstance(net_md, dict):
+ # SRU_BLOCKER: xenial, bionic and eoan should default
+ # apply_full_imds_network_config to False to retain original
+ # behavior on those releases.
result = convert_ec2_metadata_network_config(
- net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
+ net_md, fallback_nic=iface,
+ full_network_config=util.get_cfg_option_bool(
+ self.ds_cfg, 'apply_full_imds_network_config', True))
# RELEASE_BLOCKER: xenial should drop the below if statement,
# because the issue being addressed doesn't exist pre-netplan.
@@ -496,11 +523,29 @@ class DataSourceEc2(sources.DataSource):
self._api_token = None
return True # always retry
- def _status_cb(self, msg, exc=None):
- LOG.warning(msg)
- if 'Read timed out' in msg:
- LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1')
- self._api_token = API_TOKEN_DISABLED
+ def _imds_exception_cb(self, msg, exception=None):
+ """Fail quickly on proper AWS if IMDSv2 rejects API token request
+
+ Guidance from Amazon is that if IMDSv2 had disabled token requests
+ by returning a 403, or cloud-init malformed requests resulting in
+ other 40X errors, we want the datasource detection to fail quickly
+ without retries as those symptoms will likely not be resolved by
+ retries.
+
+ Exceptions such as requests.ConnectionError due to IMDS being
+ temporarily unroutable or unavailable will still retry due to the
+ callsite wait_for_url.
+ """
+ if isinstance(exception, uhelp.UrlError):
+ # requests.ConnectionError will have exception.code == None
+ if exception.code and exception.code >= 400:
+ if exception.code == 403:
+ LOG.warning('Ec2 IMDS endpoint returned a 403 error. '
+ 'HTTP endpoint is disabled. Aborting.')
+ else:
+ LOG.warning('Fatal error while requesting '
+ 'Ec2 IMDSv2 API tokens')
+ raise exception
def _get_headers(self, url=''):
"""Return a dict of headers for accessing a url.
@@ -508,8 +553,7 @@ class DataSourceEc2(sources.DataSource):
If _api_token is unset on AWS, attempt to refresh the token via a PUT
and then return the updated token header.
"""
- if self.cloud_name != CloudNames.AWS or (self._api_token ==
- API_TOKEN_DISABLED):
+ if self.cloud_name != CloudNames.AWS:
return {}
# Request a 6 hour token if URL is API_TOKEN_ROUTE
request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS}
@@ -573,9 +617,11 @@ def parse_strict_mode(cfgval):
if sleep:
try:
sleep = int(sleep)
- except ValueError:
- raise ValueError("Invalid sleep '%s' in strict_id setting '%s': "
- "not an integer" % (sleep, cfgval))
+ except ValueError as e:
+ raise ValueError(
+ "Invalid sleep '%s' in strict_id setting '%s': not an integer"
+ % (sleep, cfgval)
+ ) from e
else:
sleep = None
@@ -678,9 +724,10 @@ def _collect_platform_data():
return data
-def convert_ec2_metadata_network_config(network_md, macs_to_nics=None,
- fallback_nic=None):
- """Convert ec2 metadata to network config version 1 data dict.
+def convert_ec2_metadata_network_config(
+ network_md, macs_to_nics=None, fallback_nic=None,
+ full_network_config=True):
+ """Convert ec2 metadata to network config version 2 data dict.
@param: network_md: 'network' portion of EC2 metadata.
generally formed as {"interfaces": {"macs": {}} where
@@ -690,28 +737,105 @@ def convert_ec2_metadata_network_config(network_md, macs_to_nics=None,
not provided, get_interfaces_by_mac is called to get it from the OS.
@param: fallback_nic: Optionally provide the primary nic interface name.
This nic will be guaranteed to minimally have a dhcp4 configuration.
+ @param: full_network_config: Boolean set True to configure all networking
+ presented by IMDS. This includes rendering secondary IPv4 and IPv6
+ addresses on all NICs and rendering network config on secondary NICs.
+ If False, only the primary nic will be configured and only with dhcp
+ (IPv4/IPv6).
- @return A dict of network config version 1 based on the metadata and macs.
+ @return A dict of network config version 2 based on the metadata and macs.
"""
- netcfg = {'version': 1, 'config': []}
+ netcfg = {'version': 2, 'ethernets': {}}
if not macs_to_nics:
macs_to_nics = net.get_interfaces_by_mac()
macs_metadata = network_md['interfaces']['macs']
- for mac, nic_name in macs_to_nics.items():
+
+ if not full_network_config:
+ for mac, nic_name in macs_to_nics.items():
+ if nic_name == fallback_nic:
+ break
+ dev_config = {'dhcp4': True,
+ 'dhcp6': False,
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nic_name}
+ nic_metadata = macs_metadata.get(mac)
+ if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
+ dev_config['dhcp6'] = True
+ netcfg['ethernets'][nic_name] = dev_config
+ return netcfg
+ # Apply network config for all nics and any secondary IPv4/v6 addresses
+ for mac, nic_name in sorted(macs_to_nics.items()):
nic_metadata = macs_metadata.get(mac)
if not nic_metadata:
continue # Not a physical nic represented in metadata
- nic_cfg = {'type': 'physical', 'name': nic_name, 'subnets': []}
- nic_cfg['mac_address'] = mac
- if (nic_name == fallback_nic or nic_metadata.get('public-ipv4s') or
- nic_metadata.get('local-ipv4s')):
- nic_cfg['subnets'].append({'type': 'dhcp4'})
- if nic_metadata.get('ipv6s'):
- nic_cfg['subnets'].append({'type': 'dhcp6'})
- netcfg['config'].append(nic_cfg)
+ # device-number is zero-indexed, we want it 1-indexed for the
+ # multiplication on the following line
+ nic_idx = int(nic_metadata['device-number']) + 1
+ dhcp_override = {'route-metric': nic_idx * 100}
+ dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
+ 'dhcp6': False,
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nic_name}
+ if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
+ dev_config['dhcp6'] = True
+ dev_config['dhcp6-overrides'] = dhcp_override
+ dev_config['addresses'] = get_secondary_addresses(nic_metadata, mac)
+ if not dev_config['addresses']:
+ dev_config.pop('addresses') # Since we found none configured
+ netcfg['ethernets'][nic_name] = dev_config
+ # Remove route-metric dhcp overrides if only one nic configured
+ if len(netcfg['ethernets']) == 1:
+ for nic_name in netcfg['ethernets'].keys():
+ netcfg['ethernets'][nic_name].pop('dhcp4-overrides')
+ netcfg['ethernets'][nic_name].pop('dhcp6-overrides', None)
return netcfg
+def get_secondary_addresses(nic_metadata, mac):
+ """Parse interface-specific nic metadata and return any secondary IPs
+
+ :return: List of secondary IPv4 or IPv6 addresses to configure on the
+ interface
+ """
+ ipv4s = nic_metadata.get('local-ipv4s')
+ ipv6s = nic_metadata.get('ipv6s')
+ addresses = []
+ # In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP
+ if bool(isinstance(ipv4s, list) and len(ipv4s) > 1):
+ addresses.extend(
+ _get_secondary_addresses(
+ nic_metadata, 'subnet-ipv4-cidr-block', mac, ipv4s, '24'))
+ if bool(isinstance(ipv6s, list) and len(ipv6s) > 1):
+ addresses.extend(
+ _get_secondary_addresses(
+ nic_metadata, 'subnet-ipv6-cidr-block', mac, ipv6s, '128'))
+ return sorted(addresses)
+
+
+def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix):
+ """Return list of IP addresses as CIDRs for secondary IPs
+
+ The CIDR prefix will be default_prefix if cidr_key is absent or not
+ parseable in nic_metadata.
+ """
+ addresses = []
+ cidr = nic_metadata.get(cidr_key)
+ prefix = default_prefix
+ if not cidr or len(cidr.split('/')) != 2:
+ ip_type = 'ipv4' if 'ipv4' in cidr_key else 'ipv6'
+ LOG.warning(
+ 'Could not parse %s %s for mac %s. %s network'
+ ' config prefix defaults to /%s',
+ cidr_key, cidr, mac, ip_type, prefix)
+ else:
+ prefix = cidr.split('/')[1]
+ # We know we have > 1 ips for in metadata for this IP type
+ for ip in ips[1:]:
+ addresses.append(
+ '{ip}/{prefix}'.format(ip=ip, prefix=prefix))
+ return addresses
+
+
# Used to match classes to dependencies
datasources = [
(DataSourceEc2Local, (sources.DEP_FILESYSTEM,)), # Run at init-local
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 6cbfbbac..0ec5f6ec 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -116,7 +116,7 @@ def _write_host_key_to_guest_attributes(key_type, key_value):
resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
request_method='PUT', check_status=False)
if resp.ok():
- LOG.debug('Wrote %s host key to guest attributes.', key_type)
+ LOG.debug('Wrote %s host key to guest attributes.', key_type)
else:
LOG.debug('Unable to write %s host key to guest attributes.', key_type)
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index 50298330..a86035e0 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -59,12 +59,19 @@ class DataSourceHetzner(sources.DataSource):
self.userdata_address, timeout=self.timeout,
sec_between=self.wait_retry, retries=self.retries)
- self.userdata_raw = ud
+ # Hetzner cloud does not support binary user-data. So here, do a
+ # base64 decode of the data if we can. The end result being that a
+ # user can provide base64 encoded (possibly gzipped) data as user-data.
+ #
+ # The fallout is that in the event of b64 encoded user-data,
+ # /var/lib/cloud-init/cloud-config.txt will not be identical to the
+ # user-data provided. It will be decoded.
+ self.userdata_raw = hc_helper.maybe_b64decode(ud)
self.metadata_full = md
- """hostname is name provided by user at launch. The API enforces
- it is a valid hostname, but it is not guaranteed to be resolvable
- in dns or fully qualified."""
+ # hostname is name provided by user at launch. The API enforces it is
+ # a valid hostname, but it is not guaranteed to be resolvable in dns or
+ # fully qualified.
self.metadata['instance-id'] = md['instance-id']
self.metadata['local-hostname'] = md['hostname']
self.metadata['network-config'] = md.get('network-config', None)
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index e0c714e8..8d196185 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -99,6 +99,7 @@ import os
from cloudinit import log as logging
from cloudinit import sources
from cloudinit.sources.helpers import openstack
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -240,7 +241,7 @@ def get_ibm_platform():
fslabels = {}
try:
devs = util.blkid()
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.warning("Failed to run blkid: %s", e)
return (None, None)
@@ -302,7 +303,8 @@ def read_md():
except sources.BrokenMetadata as e:
raise RuntimeError(
"Failed reading IBM config disk (platform=%s path=%s): %s" %
- (platform, path, e))
+ (platform, path, e)
+ ) from e
ret.update(results)
return ret
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 517913aa..9156925f 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -6,8 +6,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
import hashlib
import os
import time
@@ -228,7 +226,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
except url_helper.UrlError as e:
if e.code == 404 and not optional:
raise MAASSeedDirMalformed(
- "Missing required %s: %s" % (path, e))
+ "Missing required %s: %s" % (path, e)
+ ) from e
elif e.code != 404:
raise e
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index ee748b41..e408d730 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -36,23 +36,15 @@ class DataSourceNoCloud(sources.DataSource):
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
def _get_devices(self, label):
- if util.is_FreeBSD():
- devlist = [
- p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
- if os.path.exists(p)]
- else:
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- fslist = util.find_devs_with("TYPE=vfat")
- fslist.extend(util.find_devs_with("TYPE=iso9660"))
+ fslist = util.find_devs_with("TYPE=vfat")
+ fslist.extend(util.find_devs_with("TYPE=iso9660"))
- label_list = util.find_devs_with("LABEL=%s" % label.upper())
- label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
+ label_list = util.find_devs_with("LABEL=%s" % label.upper())
+ label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
+ label_list.extend(util.find_devs_with("LABEL_FATBOOT=%s" % label))
- devlist = list(set(fslist) & set(label_list))
- devlist.sort(reverse=True)
+ devlist = list(set(fslist) & set(label_list))
+ devlist.sort(reverse=True)
return devlist
def _get_data(self):
@@ -370,7 +362,7 @@ def _merge_new_seed(cur, seeded):
class DataSourceNoCloudNet(DataSourceNoCloud):
def __init__(self, sys_cfg, distro, paths):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
- self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.supported_seed_starts = ("http://", "https://")
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 7f55b5f8..6a9a331d 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -16,6 +16,7 @@ from xml.dom import minidom
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.sources.helpers.vmware.imc.config \
import Config
@@ -37,7 +38,8 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
set_customization_status,
- get_tools_config
+ get_tools_config,
+ set_gc_status
)
LOG = logging.getLogger(__name__)
@@ -140,6 +142,8 @@ class DataSourceOVF(sources.DataSource):
try:
cf = ConfigFile(vmwareImcConfigFilePath)
self._vmware_cust_conf = Config(cf)
+ set_gc_status(self._vmware_cust_conf, "Started")
+
(md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
@@ -148,14 +152,25 @@ class DataSourceOVF(sources.DataSource):
product_marker, os.path.join(self.paths.cloud_dir, 'data'))
special_customization = product_marker and not hasmarkerfile
customscript = self._vmware_cust_conf.custom_script_name
- custScriptConfig = get_tools_config(
- CONFGROUPNAME_GUESTCUSTOMIZATION,
- GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
- "false")
- if custScriptConfig.lower() != "true":
- # Update the customization status if there is a
- # custom script is disabled
- if special_customization and customscript:
+
+ # In case there is a custom script, check whether VMware
+ # Tools configuration allow the custom script to run.
+ if special_customization and customscript:
+ defVal = "false"
+ if self._vmware_cust_conf.default_run_post_script:
+ LOG.debug(
+ "Set default value to true due to"
+ " customization configuration."
+ )
+ defVal = "true"
+
+ custScriptConfig = get_tools_config(
+ CONFGROUPNAME_GUESTCUSTOMIZATION,
+ GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
+ defVal)
+ if custScriptConfig.lower() != "true":
+ # Update the customization status if custom script
+ # is disabled
msg = "Custom script is disabled by VM Administrator"
LOG.debug(msg)
set_customization_status(
@@ -171,7 +186,8 @@ class DataSourceOVF(sources.DataSource):
"Error parsing the customization Config File",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if special_customization:
if customscript:
@@ -183,7 +199,8 @@ class DataSourceOVF(sources.DataSource):
"Error executing pre-customization script",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
try:
LOG.debug("Preparing the Network configuration")
@@ -197,7 +214,8 @@ class DataSourceOVF(sources.DataSource):
"Error preparing Network Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if special_customization:
LOG.debug("Applying password customization")
@@ -215,7 +233,8 @@ class DataSourceOVF(sources.DataSource):
"Error applying Password Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if customscript:
try:
@@ -228,7 +247,8 @@ class DataSourceOVF(sources.DataSource):
"Error executing post-customization script",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if product_marker:
try:
@@ -240,7 +260,8 @@ class DataSourceOVF(sources.DataSource):
"Error creating marker files",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
self._vmware_cust_found = True
found.append('vmware-tools')
@@ -252,6 +273,7 @@ class DataSourceOVF(sources.DataSource):
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ set_gc_status(self._vmware_cust_conf, "Successful")
else:
np = [('com.vmware.guestInfo', transport_vmware_guestinfo),
@@ -327,7 +349,7 @@ class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
- self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.supported_seed_starts = ("http://", "https://")
self.vmware_customization_supported = False
@@ -527,15 +549,15 @@ def transport_iso9660(require_iso=True):
def transport_vmware_guestinfo():
rpctool = "vmware-rpctool"
not_found = None
- if not util.which(rpctool):
+ if not subp.which(rpctool):
return not_found
cmd = [rpctool, "info-get guestinfo.ovfEnv"]
try:
- out, _err = util.subp(cmd)
+ out, _err = subp.subp(cmd)
if out:
return out
LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
LOG.warning("%s exited with code %d", rpctool, e.exit_code)
LOG.debug(e)
@@ -647,7 +669,7 @@ def setup_marker_files(markerid, marker_dir):
open(markerfile, 'w').close()
-def _raise_error_status(prefix, error, event, config_file):
+def _raise_error_status(prefix, error, event, config_file, conf):
"""
Raise error and send customization status to the underlying VMware
Virtualization Platform. Also, cleanup the imc directory.
@@ -656,6 +678,7 @@ def _raise_error_status(prefix, error, event, config_file):
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
event)
+ set_gc_status(conf, prefix)
util.del_dir(os.path.dirname(config_file))
raise error
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 02c9a7b8..45481938 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -13,6 +13,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import collections
+import functools
import os
import pwd
import re
@@ -21,6 +22,7 @@ import string
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
@@ -59,10 +61,19 @@ class DataSourceOpenNebula(sources.DataSource):
for cdev in candidates:
try:
if os.path.isdir(self.seed_dir):
- results = read_context_disk_dir(cdev, asuser=parseuser)
+ results = read_context_disk_dir(
+ cdev, self.distro, asuser=parseuser
+ )
elif cdev.startswith("/dev"):
- results = util.mount_cb(cdev, read_context_disk_dir,
- data=parseuser)
+ # util.mount_cb only handles passing a single argument
+ # through to the wrapped function, so we have to partially
+ # apply the function to pass in `distro`. See LP: #1884979
+ partially_applied_func = functools.partial(
+ read_context_disk_dir,
+ asuser=parseuser,
+ distro=self.distro,
+ )
+ results = util.mount_cb(cdev, partially_applied_func)
except NonContextDiskDir:
continue
except BrokenContextDiskDir as exc:
@@ -128,10 +139,10 @@ class BrokenContextDiskDir(Exception):
class OpenNebulaNetwork(object):
- def __init__(self, context, system_nics_by_mac=None):
+ def __init__(self, context, distro, system_nics_by_mac=None):
self.context = context
if system_nics_by_mac is None:
- system_nics_by_mac = get_physical_nics_by_mac()
+ system_nics_by_mac = get_physical_nics_by_mac(distro)
self.ifaces = collections.OrderedDict(
[k for k in sorted(system_nics_by_mac.items(),
key=lambda k: net.natural_sort_key(k[1]))])
@@ -334,7 +345,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
cmd.extend(bash)
- (output, _error) = util.subp(cmd, data=bcmd)
+ (output, _error) = subp.subp(cmd, data=bcmd)
# exclude vars in bash that change on their own or that we used
excluded = (
@@ -366,7 +377,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
return ret
-def read_context_disk_dir(source_dir, asuser=None):
+def read_context_disk_dir(source_dir, distro, asuser=None):
"""
read_context_disk_dir(source_dir):
read source_dir and return a tuple with metadata dict and user-data
@@ -388,18 +399,23 @@ def read_context_disk_dir(source_dir, asuser=None):
if asuser is not None:
try:
pwd.getpwnam(asuser)
- except KeyError:
+ except KeyError as e:
raise BrokenContextDiskDir(
"configured user '{user}' does not exist".format(
- user=asuser))
+ user=asuser)
+ ) from e
try:
path = os.path.join(source_dir, 'context.sh')
content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
- except util.ProcessExecutionError as e:
- raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
+ except subp.ProcessExecutionError as e:
+ raise BrokenContextDiskDir(
+ "Error processing context.sh: %s" % (e)
+ ) from e
except IOError as e:
- raise NonContextDiskDir("Error reading context.sh: %s" % (e))
+ raise NonContextDiskDir(
+ "Error reading context.sh: %s" % (e)
+ ) from e
else:
raise NonContextDiskDir("Missing context.sh")
@@ -417,9 +433,9 @@ def read_context_disk_dir(source_dir, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [l for l in lines
- if len(l) and not
- l.startswith("#")]
+ results['metadata']['public-keys'] = [
+ line for line in lines if len(line) and not line.startswith("#")
+ ]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
@@ -449,15 +465,17 @@ def read_context_disk_dir(source_dir, asuser=None):
# http://docs.opennebula.org/5.4/operation/references/template.html#context-section
ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]
if ipaddr_keys:
- onet = OpenNebulaNetwork(context)
+ onet = OpenNebulaNetwork(context, distro)
results['network-interfaces'] = onet.gen_conf()
return results
-def get_physical_nics_by_mac():
+def get_physical_nics_by_mac(distro):
devs = net.get_interfaces_by_mac()
- return dict([(m, n) for m, n in devs.items() if net.is_physical(n)])
+ return dict(
+ [(m, n) for m, n in devs.items() if distro.networking.is_physical(n)]
+ )
# Legacy: Must be present in case we load an old pkl object
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 7a5e71b6..d4b43f44 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -29,7 +29,10 @@ DMI_PRODUCT_NOVA = 'OpenStack Nova'
DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
-VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
+# See github.com/sapcc/helm-charts/blob/master/openstack/nova/values.yaml
+# -> compute.defaults.vmware.smbios_asset_tag for this value
+DMI_ASSET_TAG_SAPCCLOUD = 'SAP CCloud VM'
+VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_SAPCCLOUD]
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
@@ -191,10 +194,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
'timeout': url_params.timeout_seconds})
except openstack.NonReadable as e:
raise sources.InvalidMetaDataException(str(e))
- except (openstack.BrokenMetadata, IOError):
+ except (openstack.BrokenMetadata, IOError) as e:
msg = 'Broken metadata address {addr}'.format(
addr=self.metadata_address)
- raise sources.InvalidMetaDataException(msg)
+ raise sources.InvalidMetaDataException(msg) from e
return result
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index eec87403..20d6487d 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -1,30 +1,31 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Datasource for Oracle (OCI/Oracle Cloud Infrastructure)
-OCI provides a OpenStack like metadata service which provides only
-'2013-10-17' and 'latest' versions..
-
Notes:
- * This datasource does not support the OCI-Classic. OCI-Classic
- provides an EC2 lookalike metadata service.
- * The uuid provided in DMI data is not the same as the meta-data provided
+ * This datasource does not support OCI Classic. OCI Classic provides an EC2
+ lookalike metadata service.
+ * The UUID provided in DMI data is not the same as the meta-data provided
instance-id, but has an equivalent lifespan.
* We do need to support upgrade from an instance that cloud-init
identified as OpenStack.
- * Both bare-metal and vms use iscsi root
- * Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com
+ * Bare metal instances use iSCSI root, virtual machine instances do not.
+ * Both bare metal and virtual machine instances provide a chassis-asset-tag of
+ OracleCloud.com.
"""
-from cloudinit.url_helper import combine_url, readurl, UrlError
-from cloudinit.net import dhcp, get_interfaces_by_mac, is_netfail_master
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import util
-from cloudinit.net import cmdline
-from cloudinit import log as logging
+import base64
+from collections import namedtuple
+from contextlib import suppress as noop
-import json
-import re
+from cloudinit import log as logging
+from cloudinit import net, sources, util
+from cloudinit.net import (
+ cmdline,
+ dhcp,
+ get_interfaces_by_mac,
+ is_netfail_master,
+)
+from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
@@ -33,79 +34,13 @@ BUILTIN_DS_CONFIG = {
'configure_secondary_nics': False,
}
CHASSIS_ASSET_TAG = "OracleCloud.com"
-METADATA_ENDPOINT = "http://169.254.169.254/openstack/"
-VNIC_METADATA_URL = 'http://169.254.169.254/opc/v1/vnics/'
+METADATA_ROOT = "http://169.254.169.254/opc/v{version}/"
+METADATA_PATTERN = METADATA_ROOT + "{path}/"
# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview,
# indicates that an MTU of 9000 is used within OCI
MTU = 9000
-
-def _add_network_config_from_opc_imds(network_config):
- """
- Fetch data from Oracle's IMDS, generate secondary NIC config, merge it.
-
- The primary NIC configuration should not be modified based on the IMDS
- values, as it should continue to be configured for DHCP. As such, this
- takes an existing network_config dict which is expected to have the primary
- NIC configuration already present. It will mutate the given dict to
- include the secondary VNICs.
-
- :param network_config:
- A v1 or v2 network config dict with the primary NIC already configured.
- This dict will be mutated.
-
- :raises:
- Exceptions are not handled within this function. Likely exceptions are
- those raised by url_helper.readurl (if communicating with the IMDS
- fails), ValueError/JSONDecodeError (if the IMDS returns invalid JSON),
- and KeyError/IndexError (if the IMDS returns valid JSON with unexpected
- contents).
- """
- resp = readurl(VNIC_METADATA_URL)
- vnics = json.loads(str(resp))
-
- if 'nicIndex' in vnics[0]:
- # TODO: Once configure_secondary_nics defaults to True, lower the level
- # of this log message. (Currently, if we're running this code at all,
- # someone has explicitly opted-in to secondary VNIC configuration, so
- # we should warn them that it didn't happen. Once it's default, this
- # would be emitted on every Bare Metal Machine launch, which means INFO
- # or DEBUG would be more appropriate.)
- LOG.warning(
- 'VNIC metadata indicates this is a bare metal machine; skipping'
- ' secondary VNIC configuration.'
- )
- return
-
- interfaces_by_mac = get_interfaces_by_mac()
-
- for vnic_dict in vnics[1:]:
- # We skip the first entry in the response because the primary interface
- # is already configured by iSCSI boot; applying configuration from the
- # IMDS is not required.
- mac_address = vnic_dict['macAddr'].lower()
- if mac_address not in interfaces_by_mac:
- LOG.debug('Interface with MAC %s not found; skipping', mac_address)
- continue
- name = interfaces_by_mac[mac_address]
-
- if network_config['version'] == 1:
- subnet = {
- 'type': 'static',
- 'address': vnic_dict['privateIp'],
- }
- network_config['config'].append({
- 'name': name,
- 'type': 'physical',
- 'mac_address': mac_address,
- 'mtu': MTU,
- 'subnets': [subnet],
- })
- elif network_config['version'] == 2:
- network_config['ethernets'][name] = {
- 'addresses': [vnic_dict['privateIp']],
- 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
- 'match': {'macaddress': mac_address}}
+OpcMetadata = namedtuple("OpcMetadata", "version instance_data vnics_data")
def _ensure_netfailover_safe(network_config):
@@ -174,6 +109,7 @@ class DataSourceOracle(sources.DataSource):
def __init__(self, sys_cfg, *args, **kwargs):
super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
+ self._vnics_data = None
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}),
@@ -187,54 +123,46 @@ class DataSourceOracle(sources.DataSource):
if not self._is_platform_viable():
return False
+ self.system_uuid = _read_system_uuid()
+
# network may be configured if iscsi root. If that is the case
# then read_initramfs_config will return non-None.
- if _is_iscsi_root():
- data = self.crawl_metadata()
- else:
- with dhcp.EphemeralDHCPv4(net.find_fallback_nic()):
- data = self.crawl_metadata()
-
- self._crawled_metadata = data
- vdata = data['2013-10-17']
-
- self.userdata_raw = vdata.get('user_data')
- self.system_uuid = vdata['system_uuid']
-
- vd = vdata.get('vendor_data')
- if vd:
- self.vendordata_pure = vd
- try:
- self.vendordata_raw = sources.convert_vendordata(vd)
- except ValueError as e:
- LOG.warning("Invalid content in vendor-data: %s", e)
- self.vendordata_raw = None
-
- mdcopies = ('public_keys',)
- md = dict([(k, vdata['meta_data'].get(k))
- for k in mdcopies if k in vdata['meta_data']])
-
- mdtrans = (
- # oracle meta_data.json name, cloudinit.datasource.metadata name
- ('availability_zone', 'availability-zone'),
- ('hostname', 'local-hostname'),
- ('launch_index', 'launch-index'),
- ('uuid', 'instance-id'),
+ fetch_vnics_data = self.ds_cfg.get(
+ 'configure_secondary_nics',
+ BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ )
+ network_context = noop()
+ if not _is_iscsi_root():
+ network_context = dhcp.EphemeralDHCPv4(net.find_fallback_nic())
+ with network_context:
+ fetched_metadata = read_opc_metadata(
+ fetch_vnics_data=fetch_vnics_data
+ )
+
+ data = self._crawled_metadata = fetched_metadata.instance_data
+ self.metadata_address = METADATA_ROOT.format(
+ version=fetched_metadata.version
)
- for dsname, ciname in mdtrans:
- if dsname in vdata['meta_data']:
- md[ciname] = vdata['meta_data'][dsname]
+ self._vnics_data = fetched_metadata.vnics_data
+
+ self.metadata = {
+ "availability-zone": data["ociAdName"],
+ "instance-id": data["id"],
+ "launch-index": 0,
+ "local-hostname": data["hostname"],
+ "name": data["displayName"],
+ }
+
+ if "metadata" in data:
+ user_data = data["metadata"].get("user_data")
+ if user_data:
+ self.userdata_raw = base64.b64decode(user_data)
+ self.metadata["public_keys"] = data["metadata"].get(
+ "ssh_authorized_keys"
+ )
- self.metadata = md
return True
- def crawl_metadata(self):
- return read_metadata()
-
- def _get_subplatform(self):
- """Return the subplatform metadata source details."""
- return 'metadata (%s)' % METADATA_ENDPOINT
-
def check_instance_id(self, sys_cfg):
"""quickly check (local only) if self.instance_id is still valid
@@ -248,15 +176,9 @@ class DataSourceOracle(sources.DataSource):
@property
def network_config(self):
"""Network config is read from initramfs provided files
- If none is present, then we fall back to fallback configuration.
- One thing to note here is that this method is not currently
- considered at all if there is is kernel/initramfs provided
- data. In that case, stages considers that the cmdline data
- overrides datasource provided data and does not consult here.
-
- We nonetheless return cmdline provided config if present
- and fallback to generate fallback."""
+ If none is present, then we fall back to fallback configuration.
+ """
if self._network_config == sources.UNSET:
# this is v1
self._network_config = cmdline.read_initramfs_config()
@@ -265,14 +187,18 @@ class DataSourceOracle(sources.DataSource):
# this is now v2
self._network_config = self.distro.generate_fallback_config()
- if self.ds_cfg.get('configure_secondary_nics'):
+ if self.ds_cfg.get(
+ 'configure_secondary_nics',
+ BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ ):
try:
- # Mutate self._network_config to include secondary VNICs
- _add_network_config_from_opc_imds(self._network_config)
+ # Mutate self._network_config to include secondary
+ # VNICs
+ self._add_network_config_from_opc_imds()
except Exception:
util.logexc(
LOG,
- "Failed to fetch secondary network configuration!")
+ "Failed to parse secondary network configuration!")
# we need to verify that the nic selected is not a netfail over
# device and, if it is a netfail master, then we need to avoid
@@ -281,6 +207,70 @@ class DataSourceOracle(sources.DataSource):
return self._network_config
+ def _add_network_config_from_opc_imds(self):
+ """Generate secondary NIC config from IMDS and merge it.
+
+ The primary NIC configuration should not be modified based on the IMDS
+ values, as it should continue to be configured for DHCP. As such, this
+ uses the instance's network config dict which is expected to have the
+ primary NIC configuration already present.
+ It will mutate the network config to include the secondary VNICs.
+
+ :raises:
+ Exceptions are not handled within this function. Likely
+ exceptions are KeyError/IndexError
+ (if the IMDS returns valid JSON with unexpected contents).
+ """
+ if self._vnics_data is None:
+ LOG.warning(
+ "Secondary NIC data is UNSET but should not be")
+ return
+
+ if 'nicIndex' in self._vnics_data[0]:
+ # TODO: Once configure_secondary_nics defaults to True, lower the
+ # level of this log message. (Currently, if we're running this
+ # code at all, someone has explicitly opted-in to secondary
+ # VNIC configuration, so we should warn them that it didn't
+ # happen. Once it's default, this would be emitted on every Bare
+ # Metal Machine launch, which means INFO or DEBUG would be more
+ # appropriate.)
+ LOG.warning(
+ 'VNIC metadata indicates this is a bare metal machine; '
+ 'skipping secondary VNIC configuration.'
+ )
+ return
+
+ interfaces_by_mac = get_interfaces_by_mac()
+
+ for vnic_dict in self._vnics_data[1:]:
+ # We skip the first entry in the response because the primary
+ # interface is already configured by iSCSI boot; applying
+ # configuration from the IMDS is not required.
+ mac_address = vnic_dict['macAddr'].lower()
+ if mac_address not in interfaces_by_mac:
+ LOG.debug('Interface with MAC %s not found; skipping',
+ mac_address)
+ continue
+ name = interfaces_by_mac[mac_address]
+
+ if self._network_config['version'] == 1:
+ subnet = {
+ 'type': 'static',
+ 'address': vnic_dict['privateIp'],
+ }
+ self._network_config['config'].append({
+ 'name': name,
+ 'type': 'physical',
+ 'mac_address': mac_address,
+ 'mtu': MTU,
+ 'subnets': [subnet],
+ })
+ elif self._network_config['version'] == 2:
+ self._network_config['ethernets'][name] = {
+ 'addresses': [vnic_dict['privateIp']],
+ 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
+ 'match': {'macaddress': mac_address}}
+
def _read_system_uuid():
sys_uuid = util.read_dmi_data('system-uuid')
@@ -296,72 +286,46 @@ def _is_iscsi_root():
return bool(cmdline.read_initramfs_config())
-def _load_index(content):
- """Return a list entries parsed from content.
-
- OpenStack's metadata service returns a newline delimited list
- of items. Oracle's implementation has html formatted list of links.
- The parser here just grabs targets from <a href="target">
- and throws away "../".
-
- Oracle has accepted that to be buggy and may fix in the future
- to instead return a '\n' delimited plain text list. This function
- will continue to work if that change is made."""
- if not content.lower().startswith("<html>"):
- return content.splitlines()
- items = re.findall(
- r'href="(?P<target>[^"]*)"', content, re.MULTILINE | re.IGNORECASE)
- return [i for i in items if not i.startswith(".")]
-
+def read_opc_metadata(*, fetch_vnics_data: bool = False):
+ """Fetch metadata from the /opc/ routes.
-def read_metadata(endpoint_base=METADATA_ENDPOINT, sys_uuid=None,
- version='2013-10-17'):
- """Read metadata, return a dictionary.
+ :return:
+ A namedtuple containing:
+ The metadata version as an integer
+ The JSON-decoded value of the instance data endpoint on the IMDS
+ The JSON-decoded value of the vnics data endpoint if
+ `fetch_vnics_data` is True, else None
- Each path listed in the index will be represented in the dictionary.
- If the path ends in .json, then the content will be decoded and
- populated into the dictionary.
-
- The system uuid (/sys/class/dmi/id/product_uuid) is also populated.
- Example: given paths = ('user_data', 'meta_data.json')
- This would return:
- {version: {'user_data': b'blob', 'meta_data': json.loads(blob.decode())
- 'system_uuid': '3b54f2e0-3ab2-458d-b770-af9926eee3b2'}}
"""
- endpoint = combine_url(endpoint_base, version) + "/"
- if sys_uuid is None:
- sys_uuid = _read_system_uuid()
- if not sys_uuid:
- raise sources.BrokenMetadata("Failed to read system uuid.")
-
+ # Per Oracle, there are short windows (measured in milliseconds) throughout
+ # an instance's lifetime where the IMDS is being updated and may 404 as a
+ # result. To work around these windows, we retry a couple of times.
+ retries = 2
+
+ def _fetch(metadata_version: int, path: str) -> dict:
+ headers = {
+ "Authorization": "Bearer Oracle"} if metadata_version > 1 else None
+ return readurl(
+ url=METADATA_PATTERN.format(version=metadata_version, path=path),
+ headers=headers,
+ retries=retries,
+ )._response.json()
+
+ metadata_version = 2
try:
- resp = readurl(endpoint)
- if not resp.ok():
- raise sources.BrokenMetadata(
- "Bad response from %s: %s" % (endpoint, resp.code))
- except UrlError as e:
- raise sources.BrokenMetadata(
- "Failed to read index at %s: %s" % (endpoint, e))
-
- entries = _load_index(resp.contents.decode('utf-8'))
- LOG.debug("index url %s contained: %s", endpoint, entries)
-
- # meta_data.json is required.
- mdj = 'meta_data.json'
- if mdj not in entries:
- raise sources.BrokenMetadata(
- "Required field '%s' missing in index at %s" % (mdj, endpoint))
-
- ret = {'system_uuid': sys_uuid}
- for path in entries:
- response = readurl(combine_url(endpoint, path))
- if path.endswith(".json"):
- ret[path.rpartition(".")[0]] = (
- json.loads(response.contents.decode('utf-8')))
- else:
- ret[path] = response.contents
-
- return {version: ret}
+ instance_data = _fetch(metadata_version, path="instance")
+ except UrlError:
+ metadata_version = 1
+ instance_data = _fetch(metadata_version, path="instance")
+
+ vnics_data = None
+ if fetch_vnics_data:
+ try:
+ vnics_data = _fetch(metadata_version, path="vnics")
+ except UrlError:
+ util.logexc(LOG,
+ "Failed to fetch secondary network configuration!")
+ return OpcMetadata(metadata_version, instance_data, vnics_data)
# Used to match classes to dependencies
@@ -377,17 +341,21 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
- import os
-
- parser = argparse.ArgumentParser(description='Query Oracle Cloud Metadata')
- parser.add_argument("--endpoint", metavar="URL",
- help="The url of the metadata service.",
- default=METADATA_ENDPOINT)
- args = parser.parse_args()
- sys_uuid = "uuid-not-available-not-root" if os.geteuid() != 0 else None
-
- data = read_metadata(endpoint_base=args.endpoint, sys_uuid=sys_uuid)
- data['is_platform_viable'] = _is_platform_viable()
- print(util.json_dumps(data))
+
+ description = """
+ Query Oracle Cloud metadata and emit a JSON object with two keys:
+ `read_opc_metadata` and `_is_platform_viable`. The values of each are
+ the return values of the corresponding functions defined in
+ DataSourceOracle.py."""
+ parser = argparse.ArgumentParser(description=description)
+ parser.parse_args()
+ print(
+ util.json_dumps(
+ {
+ "read_opc_metadata": read_opc_metadata(),
+ "_is_platform_viable": _is_platform_viable(),
+ }
+ )
+ )
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index c3cd5c79..e064c8d6 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -15,6 +15,7 @@ import os.path
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.event import EventType
@@ -43,11 +44,11 @@ def int2ip(addr):
def _sub_arp(cmd):
"""
- Uses the prefered cloud-init subprocess def of util.subp
+ Uses the preferred cloud-init subprocess def of subp.subp
and runs arping. Breaking this to a separate function
for later use in mocking and unittests
"""
- return util.subp(['arping'] + cmd)
+ return subp.subp(['arping'] + cmd)
def gratuitous_arp(items, distro):
@@ -55,26 +56,32 @@ def gratuitous_arp(items, distro):
if distro.name in ['fedora', 'centos', 'rhel']:
source_param = '-s'
for item in items:
- _sub_arp([
- '-c', '2',
- source_param, item['source'],
- item['destination']
- ])
+ try:
+ _sub_arp([
+ '-c', '2',
+ source_param, item['source'],
+ item['destination']
+ ])
+ except subp.ProcessExecutionError as error:
+ # warning, because the system is able to function properly
+ # despite no success - some ARP table may be waiting for
+ # expiration, but the system may continue
+ LOG.warning('Failed to arping from "%s" to "%s": %s',
+ item['source'], item['destination'], error)
def get_md():
rbx_data = None
- devices = [
- dev
- for dev, bdata in util.blkid().items()
- if bdata.get('LABEL', '').upper() == 'CLOUDMD'
- ]
+ devices = set(
+ util.find_devs_with('LABEL=CLOUDMD') +
+ util.find_devs_with('LABEL=cloudmd')
+ )
for device in devices:
try:
rbx_data = util.mount_cb(
device=device,
callback=read_user_data_callback,
- mtype=['vfat', 'fat']
+ mtype=['vfat', 'fat', 'msdosfs']
)
if rbx_data:
break
@@ -182,7 +189,6 @@ def read_user_data_callback(mount_dir):
'passwd': hash,
'lock_passwd': False,
'ssh_authorized_keys': ssh_keys,
- 'shell': '/bin/bash'
}
},
'network_config': network,
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index cf676504..f1f903bc 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -33,6 +33,7 @@ import socket
from cloudinit import log as logging
from cloudinit import serial
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.event import EventType
@@ -412,7 +413,9 @@ class JoyentMetadataClient(object):
response.append(byte)
except OSError as exc:
if exc.errno == errno.EAGAIN:
- raise JoyentMetadataTimeoutException(msg % as_ascii())
+ raise JoyentMetadataTimeoutException(
+ msg % as_ascii()
+ ) from exc
raise
def _write(self, msg):
@@ -696,9 +699,9 @@ def identify_file(content_f):
cmd = ["file", "--brief", "--mime-type", content_f]
f_type = None
try:
- (f_type, _err) = util.subp(cmd)
+ (f_type, _err) = subp.subp(cmd)
LOG.debug("script %s mime type is %s", content_f, f_type)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
util.logexc(
LOG, ("Failed to identify script type for %s" % content_f, e))
return None if f_type is None else f_type.strip()
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index dd93cfd8..c4d60fff 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -78,7 +78,6 @@ class DataSourceNotFoundException(Exception):
class InvalidMetaDataException(Exception):
"""Raised when metadata is broken, unavailable or disabled."""
- pass
def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
@@ -89,26 +88,26 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
@return Dict copy of processed metadata.
"""
md_copy = copy.deepcopy(metadata)
- md_copy['base64_encoded_keys'] = []
- md_copy['sensitive_keys'] = []
+ base64_encoded_keys = []
+ sens_keys = []
for key, val in metadata.items():
if key_path:
sub_key_path = key_path + '/' + key
else:
sub_key_path = key
if key in sensitive_keys or sub_key_path in sensitive_keys:
- md_copy['sensitive_keys'].append(sub_key_path)
+ sens_keys.append(sub_key_path)
if isinstance(val, str) and val.startswith('ci-b64:'):
- md_copy['base64_encoded_keys'].append(sub_key_path)
+ base64_encoded_keys.append(sub_key_path)
md_copy[key] = val.replace('ci-b64:', '')
if isinstance(val, dict):
return_val = process_instance_metadata(
val, sub_key_path, sensitive_keys)
- md_copy['base64_encoded_keys'].extend(
- return_val.pop('base64_encoded_keys'))
- md_copy['sensitive_keys'].extend(
- return_val.pop('sensitive_keys'))
+ base64_encoded_keys.extend(return_val.pop('base64_encoded_keys'))
+ sens_keys.extend(return_val.pop('sensitive_keys'))
md_copy[key] = return_val
+ md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys)
+ md_copy['sensitive_keys'] = sorted(sens_keys)
return md_copy
@@ -193,7 +192,7 @@ class DataSource(metaclass=abc.ABCMeta):
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = ('security-credentials',)
+ sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
@@ -218,14 +217,15 @@ class DataSource(metaclass=abc.ABCMeta):
def __str__(self):
return type_utils.obj_name(self)
- def _get_standardized_metadata(self):
+ def _get_standardized_metadata(self, instance_data):
"""Return a dictionary of standardized metadata keys."""
local_hostname = self.get_hostname()
instance_id = self.get_instance_id()
availability_zone = self.availability_zone
# In the event of upgrade from existing cloudinit, pickled datasource
# will not contain these new class attributes. So we need to recrawl
- # metadata to discover that content.
+ # metadata to discover that content
+ sysinfo = instance_data["sys_info"]
return {
'v1': {
'_beta_keys': ['subplatform'],
@@ -233,14 +233,22 @@ class DataSource(metaclass=abc.ABCMeta):
'availability_zone': availability_zone,
'cloud-name': self.cloud_name,
'cloud_name': self.cloud_name,
+ 'distro': sysinfo["dist"][0],
+ 'distro_version': sysinfo["dist"][1],
+ 'distro_release': sysinfo["dist"][2],
'platform': self.platform_type,
'public_ssh_keys': self.get_public_ssh_keys(),
+ 'python_version': sysinfo["python"],
'instance-id': instance_id,
'instance_id': instance_id,
+ 'kernel_release': sysinfo["uname"][2],
'local-hostname': local_hostname,
'local_hostname': local_hostname,
+ 'machine': sysinfo["uname"][4],
'region': self.region,
- 'subplatform': self.subplatform}}
+ 'subplatform': self.subplatform,
+ 'system_platform': sysinfo["platform"],
+ 'variant': sysinfo["variant"]}}
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -299,9 +307,15 @@ class DataSource(metaclass=abc.ABCMeta):
ec2_metadata = getattr(self, 'ec2_metadata')
if ec2_metadata != UNSET:
instance_data['ds']['ec2_metadata'] = ec2_metadata
- instance_data.update(
- self._get_standardized_metadata())
instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT
+ # Add merged cloud.cfg and sys info for jinja templates and cli query
+ instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg)
+ instance_data['merged_cfg']['_doc'] = (
+ 'Merged cloud-init system config from /etc/cloud/cloud.cfg and'
+ ' /etc/cloud/cloud.cfg.d/')
+ instance_data['sys_info'] = util.system_info()
+ instance_data.update(
+ self._get_standardized_metadata(instance_data))
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
@@ -315,12 +329,12 @@ class DataSource(metaclass=abc.ABCMeta):
except UnicodeDecodeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
return False
- json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
- write_json(json_file, processed_data) # World readable
json_sensitive_file = os.path.join(self.paths.run_dir,
INSTANCE_JSON_SENSITIVE_FILE)
- write_json(json_sensitive_file,
- redact_sensitive_keys(processed_data), mode=0o600)
+ write_json(json_sensitive_file, processed_data, mode=0o600)
+ json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ # World readable
+ write_json(json_file, redact_sensitive_keys(processed_data))
return True
def _get_data(self):
@@ -496,7 +510,6 @@ class DataSource(metaclass=abc.ABCMeta):
(e.g. 'ssh-rsa') and key_value is the key itself
(e.g. 'AAAAB3NzaC1y...').
"""
- pass
def _remap_device(self, short_name):
# LP: #611137
@@ -587,7 +600,7 @@ class DataSource(metaclass=abc.ABCMeta):
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
- if util.is_ipv4(lhost):
+ if net.is_ipv4_address(lhost):
toks = []
if resolve_ip:
toks = util.gethostbyaddr(lhost)
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index fc760581..b968a96f 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -1,5 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
+import base64
import json
import logging
import os
@@ -8,13 +8,16 @@ import socket
import struct
import time
import textwrap
+import zlib
+from cloudinit.settings import CFG_BUILTIN
from cloudinit.net import dhcp
from cloudinit import stages
from cloudinit import temp_utils
from contextlib import contextmanager
from xml.etree import ElementTree
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit import version
@@ -32,7 +35,14 @@ DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
BOOT_EVENT_TYPE = 'boot-telemetry'
SYSTEMINFO_EVENT_TYPE = 'system-info'
DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
-
+COMPRESSED_EVENT_TYPE = 'compressed'
+# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP
+# at once. This number is based on the analysis done on a large sample of
+# cloud-init.log files where the P95 of the file sizes was 537KB and the time
+# consumed to dump 500KB file was (P95:76, P99:233, P99.9:1170) in ms
+MAX_LOG_TO_KVP_LENGTH = 512000
+# Marker file to indicate whether cloud-init.log is pushed to KVP
+LOG_PUSHED_TO_KVP_MARKER_FILE = '/var/lib/cloud/data/log_pushed_to_kvp'
azure_ds_reporter = events.ReportEventStack(
name="azure-ds",
description="initialize reporter for azure ds",
@@ -64,13 +74,15 @@ def is_byte_swapped(previous_id, current_id):
return ''.join(dd)
parts = current_id.split('-')
- swapped_id = '-'.join([
+ swapped_id = '-'.join(
+ [
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
parts[4]
- ])
+ ]
+ )
return previous_id == swapped_id
@@ -86,11 +98,13 @@ def get_boot_telemetry():
LOG.debug("Collecting boot telemetry")
try:
kernel_start = float(time.time()) - float(util.uptime())
- except ValueError:
- raise RuntimeError("Failed to determine kernel start timestamp")
+ except ValueError as e:
+ raise RuntimeError(
+ "Failed to determine kernel start timestamp"
+ ) from e
try:
- out, _ = util.subp(['/bin/systemctl',
+ out, _ = subp.subp(['/bin/systemctl',
'show', '-p',
'UserspaceTimestampMonotonic'],
capture=True)
@@ -103,16 +117,17 @@ def get_boot_telemetry():
"UserspaceTimestampMonotonic from systemd")
user_start = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
- raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s"
- % e)
+ except subp.ProcessExecutionError as e:
+ raise RuntimeError(
+ "Failed to get UserspaceTimestampMonotonic: %s" % e
+ ) from e
except ValueError as e:
- raise RuntimeError("Failed to parse "
- "UserspaceTimestampMonotonic from systemd: %s"
- % e)
+ raise RuntimeError(
+ "Failed to parse UserspaceTimestampMonotonic from systemd: %s" % e
+ ) from e
try:
- out, _ = util.subp(['/bin/systemctl', 'show',
+ out, _ = subp.subp(['/bin/systemctl', 'show',
'cloud-init-local', '-p',
'InactiveExitTimestampMonotonic'],
capture=True)
@@ -124,13 +139,15 @@ def get_boot_telemetry():
"InactiveExitTimestampMonotonic from systemd")
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
- raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s"
- % e)
+ except subp.ProcessExecutionError as e:
+ raise RuntimeError(
+ "Failed to get InactiveExitTimestampMonotonic: %s" % e
+ ) from e
except ValueError as e:
- raise RuntimeError("Failed to parse "
- "InactiveExitTimestampMonotonic from systemd: %s"
- % e)
+ raise RuntimeError(
+ "Failed to parse InactiveExitTimestampMonotonic from systemd: %s"
+ % e
+ ) from e
evt = events.ReportingEvent(
BOOT_EVENT_TYPE, 'boot-telemetry',
@@ -174,6 +191,49 @@ def report_diagnostic_event(str):
return evt
+def report_compressed_event(event_name, event_content):
+ """Report a compressed event"""
+ compressed_data = base64.encodebytes(zlib.compress(event_content))
+ event_data = {"encoding": "gz+b64",
+ "data": compressed_data.decode('ascii')}
+ evt = events.ReportingEvent(
+ COMPRESSED_EVENT_TYPE, event_name,
+ json.dumps(event_data),
+ events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt,
+ excluded_handler_types={"log", "print", "webhook"})
+
+ # return the event for unit testing purpose
+ return evt
+
+
+@azure_ds_telemetry_reporter
+def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
+ """Push a portion of cloud-init.log file or the whole file to KVP
+ based on the file size.
+ If called more than once, it skips pushing the log file to KVP again."""
+
+ log_pushed_to_kvp = bool(os.path.isfile(LOG_PUSHED_TO_KVP_MARKER_FILE))
+ if log_pushed_to_kvp:
+ report_diagnostic_event("cloud-init.log is already pushed to KVP")
+ return
+
+ LOG.debug("Dumping cloud-init.log file to KVP")
+ try:
+ with open(file_name, "rb") as f:
+ f.seek(0, os.SEEK_END)
+ seek_index = max(f.tell() - MAX_LOG_TO_KVP_LENGTH, 0)
+ report_diagnostic_event(
+ "Dumping last {} bytes of cloud-init.log file to KVP".format(
+ f.tell() - seek_index))
+ f.seek(seek_index, os.SEEK_SET)
+ report_compressed_event("cloud-init.log", f.read())
+ util.write_file(LOG_PUSHED_TO_KVP_MARKER_FILE, '')
+ except Exception as ex:
+ report_diagnostic_event("Exception when dumping log file: %s" %
+ repr(ex))
+
+
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
@@ -192,7 +252,7 @@ def _get_dhcp_endpoint_option_name():
return azure_endpoint
-class AzureEndpointHttpClient(object):
+class AzureEndpointHttpClient:
headers = {
'x-ms-agent-name': 'WALinuxAgent',
@@ -210,57 +270,77 @@ class AzureEndpointHttpClient(object):
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
- return url_helper.read_file_or_url(url, headers=headers, timeout=5,
- retries=10)
+ return url_helper.readurl(url, headers=headers,
+ timeout=5, retries=10, sec_between=5)
def post(self, url, data=None, extra_headers=None):
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return url_helper.read_file_or_url(url, data=data, headers=headers,
- timeout=5, retries=10)
+ return url_helper.readurl(url, data=data, headers=headers,
+ timeout=5, retries=10, sec_between=5)
-class GoalState(object):
+class InvalidGoalStateXMLException(Exception):
+ """Raised when GoalState XML is invalid or has missing data."""
- def __init__(self, xml, http_client):
- self.http_client = http_client
- self.root = ElementTree.fromstring(xml)
- self._certificates_xml = None
- def _text_from_xpath(self, xpath):
- element = self.root.find(xpath)
- if element is not None:
- return element.text
- return None
+class GoalState:
- @property
- def container_id(self):
- return self._text_from_xpath('./Container/ContainerId')
+ def __init__(self, unparsed_xml, azure_endpoint_client):
+ """Parses a GoalState XML string and returns a GoalState object.
- @property
- def incarnation(self):
- return self._text_from_xpath('./Incarnation')
+ @param unparsed_xml: string representing a GoalState XML.
+ @param azure_endpoint_client: instance of AzureEndpointHttpClient
+ @return: GoalState object representing the GoalState XML string.
+ """
+ self.azure_endpoint_client = azure_endpoint_client
- @property
- def instance_id(self):
- return self._text_from_xpath(
+ try:
+ self.root = ElementTree.fromstring(unparsed_xml)
+ except ElementTree.ParseError as e:
+ msg = 'Failed to parse GoalState XML: %s'
+ LOG.warning(msg, e)
+ report_diagnostic_event(msg % (e,))
+ raise
+
+ self.container_id = self._text_from_xpath('./Container/ContainerId')
+ self.instance_id = self._text_from_xpath(
'./Container/RoleInstanceList/RoleInstance/InstanceId')
+ self.incarnation = self._text_from_xpath('./Incarnation')
+
+ for attr in ("container_id", "instance_id", "incarnation"):
+ if getattr(self, attr) is None:
+ msg = 'Missing %s in GoalState XML'
+ LOG.warning(msg, attr)
+ report_diagnostic_event(msg % (attr,))
+ raise InvalidGoalStateXMLException(msg)
+
+ self.certificates_xml = None
+ url = self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance'
+ '/Configuration/Certificates')
+ if url is not None:
+ with events.ReportEventStack(
+ name="get-certificates-xml",
+ description="get certificates xml",
+ parent=azure_ds_reporter):
+ self.certificates_xml = \
+ self.azure_endpoint_client.get(
+ url, secure=True).contents
+ if self.certificates_xml is None:
+ raise InvalidGoalStateXMLException(
+ 'Azure endpoint returned empty certificates xml.')
- @property
- def certificates_xml(self):
- if self._certificates_xml is None:
- url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
- if url is not None:
- self._certificates_xml = self.http_client.get(
- url, secure=True).contents
- return self._certificates_xml
+ def _text_from_xpath(self, xpath):
+ element = self.root.find(xpath)
+ if element is not None:
+ return element.text
+ return None
-class OpenSSLManager(object):
+class OpenSSLManager:
certificate_names = {
'private_key': 'TransportPrivate.pem',
@@ -282,7 +362,7 @@ class OpenSSLManager(object):
LOG.debug('Certificate already generated.')
return
with cd(self.tmpdir):
- util.subp([
+ subp.subp([
'openssl', 'req', '-x509', '-nodes', '-subj',
'/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
'-keyout', self.certificate_names['private_key'],
@@ -299,14 +379,14 @@ class OpenSSLManager(object):
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
cmd = ['openssl', 'x509', '-noout', action]
- result, _ = util.subp(cmd, data=cert)
+ result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
pub_key = self._run_x509_action('-pubkey', certificate)
keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
- ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
+ ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@azure_ds_telemetry_reporter
@@ -339,7 +419,7 @@ class OpenSSLManager(object):
certificates_content.encode('utf-8'),
]
with cd(self.tmpdir):
- out, _ = util.subp(
+ out, _ = subp.subp(
'openssl cms -decrypt -in /dev/stdin -inkey'
' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
' -password pass:'.format(**self.certificate_names),
@@ -367,25 +447,122 @@ class OpenSSLManager(object):
return keys
-class WALinuxAgentShim(object):
-
- REPORT_READY_XML_TEMPLATE = '\n'.join([
- '<?xml version="1.0" encoding="utf-8"?>',
- '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
- ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
- ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
- ' <Container>',
- ' <ContainerId>{container_id}</ContainerId>',
- ' <RoleInstanceList>',
- ' <Role>',
- ' <InstanceId>{instance_id}</InstanceId>',
- ' <Health>',
- ' <State>Ready</State>',
- ' </Health>',
- ' </Role>',
- ' </RoleInstanceList>',
- ' </Container>',
- '</Health>'])
+class GoalStateHealthReporter:
+
+ HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\
+ <?xml version="1.0" encoding="utf-8"?>
+ <Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xsd="http://www.w3.org/2001/XMLSchema">
+ <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>
+ <Container>
+ <ContainerId>{container_id}</ContainerId>
+ <RoleInstanceList>
+ <Role>
+ <InstanceId>{instance_id}</InstanceId>
+ <Health>
+ <State>{health_status}</State>
+ {health_detail_subsection}
+ </Health>
+ </Role>
+ </RoleInstanceList>
+ </Container>
+ </Health>
+ ''')
+
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\
+ <Details>
+ <SubStatus>{health_substatus}</SubStatus>
+ <Description>{health_description}</Description>
+ </Details>
+ ''')
+
+ PROVISIONING_SUCCESS_STATUS = 'Ready'
+
+ def __init__(self, goal_state, azure_endpoint_client, endpoint):
+ """Creates instance that will report provisioning status to an endpoint
+
+ @param goal_state: An instance of class GoalState that contains
+ goal state info such as incarnation, container id, and instance id.
+ These 3 values are needed when reporting the provisioning status
+ to Azure
+ @param azure_endpoint_client: Instance of class AzureEndpointHttpClient
+ @param endpoint: Endpoint (string) where the provisioning status report
+ will be sent to
+ @return: Instance of class GoalStateHealthReporter
+ """
+ self._goal_state = goal_state
+ self._azure_endpoint_client = azure_endpoint_client
+ self._endpoint = endpoint
+
+ @azure_ds_telemetry_reporter
+ def send_ready_signal(self):
+ document = self.build_report(
+ incarnation=self._goal_state.incarnation,
+ container_id=self._goal_state.container_id,
+ instance_id=self._goal_state.instance_id,
+ status=self.PROVISIONING_SUCCESS_STATUS)
+ LOG.debug('Reporting ready to Azure fabric.')
+ try:
+ self._post_health_report(document=document)
+ except Exception as e:
+ msg = "exception while reporting ready: %s" % e
+ LOG.error(msg)
+ report_diagnostic_event(msg)
+ raise
+
+ LOG.info('Reported ready to Azure fabric.')
+
+ def build_report(
+ self, incarnation, container_id, instance_id,
+ status, substatus=None, description=None):
+ health_detail = ''
+ if substatus is not None:
+ health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
+ health_substatus=substatus, health_description=description)
+
+ health_report = self.HEALTH_REPORT_XML_TEMPLATE.format(
+ incarnation=incarnation,
+ container_id=container_id,
+ instance_id=instance_id,
+ health_status=status,
+ health_detail_subsection=health_detail)
+
+ return health_report
+
+ @azure_ds_telemetry_reporter
+ def _post_health_report(self, document):
+ push_log_to_kvp()
+
+ # Whenever report_diagnostic_event(diagnostic_msg) is invoked in code,
+ # the diagnostic messages are written to special files
+ # (/var/opt/hyperv/.kvp_pool_*) as Hyper-V KVP messages.
+ # Hyper-V KVP message communication is done through these files,
+ # and KVP functionality is used to communicate and share diagnostic
+ # info with the Azure Host.
+ # The Azure Host will collect the VM's Hyper-V KVP diagnostic messages
+ # when cloud-init reports to fabric.
+ # When the Azure Host receives the health report signal, it will only
+ # collect and process whatever KVP diagnostic messages have been
+ # written to the KVP files.
+ # KVP messages that are published after the Azure Host receives the
+ # signal are ignored and unprocessed, so yield this thread to the
+ # Hyper-V KVP Reporting thread so that they are written.
+ # time.sleep(0) is a low-cost and proven method to yield the scheduler
+ # and ensure that events are flushed.
+ # See HyperVKvpReportingHandler class, which is a multi-threaded
+ # reporting handler that writes to the special KVP files.
+ time.sleep(0)
+
+ LOG.debug('Sending health report to Azure fabric.')
+ url = "http://{}/machine?comp=health".format(self._endpoint)
+ self._azure_endpoint_client.post(
+ url,
+ data=document,
+ extra_headers={'Content-Type': 'text/xml; charset=utf-8'})
+ LOG.debug('Successfully sent health report to Azure fabric')
+
+
+class WALinuxAgentShim:
def __init__(self, fallback_lease_file=None, dhcp_options=None):
LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
@@ -393,6 +570,7 @@ class WALinuxAgentShim(object):
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
+ self.azure_endpoint_client = None
self.lease_file = fallback_lease_file
def clean_up(self):
@@ -469,9 +647,10 @@ class WALinuxAgentShim(object):
try:
name = os.path.basename(hook_file).replace('.json', '')
dhcp_options[name] = json.loads(util.load_file((hook_file)))
- except ValueError:
+ except ValueError as e:
raise ValueError(
- '{_file} is not valid JSON data'.format(_file=hook_file))
+ '{_file} is not valid JSON data'.format(_file=hook_file)
+ ) from e
return dhcp_options
@staticmethod
@@ -491,7 +670,22 @@ class WALinuxAgentShim(object):
@staticmethod
@azure_ds_telemetry_reporter
def find_endpoint(fallback_lease_file=None, dhcp245=None):
+ """Finds and returns the Azure endpoint using various methods.
+
+ The Azure endpoint is searched in the following order:
+ 1. Endpoint from dhcp options (dhcp option 245).
+ 2. Endpoint from networkd.
+ 3. Endpoint from dhclient hook json.
+ 4. Endpoint from fallback lease file.
+ 5. The default Azure endpoint.
+
+ @param fallback_lease_file: Fallback lease file that will be used
+ during endpoint search.
+ @param dhcp245: dhcp options that will be used during endpoint search.
+ @return: Azure endpoint IP address.
+ """
value = None
+
if dhcp245 is not None:
value = dhcp245
LOG.debug("Using Azure Endpoint from dhcp options")
@@ -533,42 +727,128 @@ class WALinuxAgentShim(object):
@azure_ds_telemetry_reporter
def register_with_azure_and_fetch_data(self, pubkey_info=None):
+ """Gets the VM's GoalState from Azure, uses the GoalState information
+ to report ready/send the ready signal/provisioning complete signal to
+ Azure, and then uses pubkey_info to filter and obtain the user's
+ pubkeys from the GoalState.
+
+ @param pubkey_info: List of pubkey values and fingerprints which are
+ used to filter and obtain the user's pubkey values from the
+ GoalState.
+ @return: The list of user's authorized pubkey values.
+ """
if self.openssl_manager is None:
self.openssl_manager = OpenSSLManager()
- http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
+ if self.azure_endpoint_client is None:
+ self.azure_endpoint_client = AzureEndpointHttpClient(
+ self.openssl_manager.certificate)
+ goal_state = self._fetch_goal_state_from_azure()
+ ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
+ health_reporter = GoalStateHealthReporter(
+ goal_state, self.azure_endpoint_client, self.endpoint)
+ health_reporter.send_ready_signal()
+ return {'public-keys': ssh_keys}
+
+ @azure_ds_telemetry_reporter
+ def _fetch_goal_state_from_azure(self):
+ """Fetches the GoalState XML from the Azure endpoint, parses the XML,
+ and returns a GoalState object.
+
+ @return: GoalState object representing the GoalState XML
+ """
+ unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure()
+ return self._parse_raw_goal_state_xml(unparsed_goal_state_xml)
+
+ @azure_ds_telemetry_reporter
+ def _get_raw_goal_state_xml_from_azure(self):
+ """Fetches the GoalState XML from the Azure endpoint and returns
+ the XML as a string.
+
+ @return: GoalState XML string
+ """
+
LOG.info('Registering with Azure...')
- attempts = 0
- while True:
- try:
- response = http_client.get(
- 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
- except Exception as e:
- if attempts < 10:
- time.sleep(attempts + 1)
- else:
- report_diagnostic_event(
- "failed to register with Azure: %s" % e)
- raise
- else:
- break
- attempts += 1
+ url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint)
+ try:
+ response = self.azure_endpoint_client.get(url)
+ except Exception as e:
+ msg = 'failed to register with Azure: %s' % e
+ LOG.warning(msg)
+ report_diagnostic_event(msg)
+ raise
LOG.debug('Successfully fetched GoalState XML.')
- goal_state = GoalState(response.contents, http_client)
- report_diagnostic_event("container_id %s" % goal_state.container_id)
+ return response.contents
+
+ @azure_ds_telemetry_reporter
+ def _parse_raw_goal_state_xml(self, unparsed_goal_state_xml):
+ """Parses a GoalState XML string and returns a GoalState object.
+
+ @param unparsed_goal_state_xml: GoalState XML string
+ @return: GoalState object representing the GoalState XML
+ """
+ try:
+ goal_state = GoalState(
+ unparsed_goal_state_xml, self.azure_endpoint_client)
+ except Exception as e:
+ msg = 'Error processing GoalState XML: %s' % e
+ LOG.warning(msg)
+ report_diagnostic_event(msg)
+ raise
+ msg = ', '.join([
+ 'GoalState XML container id: %s' % goal_state.container_id,
+ 'GoalState XML instance id: %s' % goal_state.instance_id,
+ 'GoalState XML incarnation: %s' % goal_state.incarnation])
+ LOG.debug(msg)
+ report_diagnostic_event(msg)
+ return goal_state
+
+ @azure_ds_telemetry_reporter
+ def _get_user_pubkeys(self, goal_state, pubkey_info):
+ """Gets and filters the VM admin user's authorized pubkeys.
+
+ The admin user in this case is the username specified as "admin"
+ when deploying VMs on Azure.
+ See https://docs.microsoft.com/en-us/cli/azure/vm#az-vm-create.
+ cloud-init expects a straightforward array of keys to be dropped
+ into the admin user's authorized_keys file. Azure control plane exposes
+ multiple public keys to the VM via wireserver. Select just the
+ admin user's key(s) and return them, ignoring any other certs.
+
+ @param goal_state: GoalState object. The GoalState object contains
+ a certificate XML, which contains both the VM user's authorized
+ pubkeys and other non-user pubkeys, which are used for
+ MSI and protected extension handling.
+ @param pubkey_info: List of VM user pubkey dicts that were previously
+ obtained from provisioning data.
+ Each pubkey dict in this list can either have the format
+ pubkey['value'] or pubkey['fingerprint'].
+ Each pubkey['fingerprint'] in the list is used to filter
+ and obtain the actual pubkey value from the GoalState
+ certificates XML.
+ Each pubkey['value'] requires no further processing and is
+ immediately added to the return list.
+ @return: A list of the VM user's authorized pubkey values.
+ """
ssh_keys = []
if goal_state.certificates_xml is not None and pubkey_info is not None:
LOG.debug('Certificate XML found; parsing out public keys.')
keys_by_fingerprint = self.openssl_manager.parse_certificates(
goal_state.certificates_xml)
ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
- self._report_ready(goal_state, http_client)
- return {'public-keys': ssh_keys}
+ return ssh_keys
- def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info):
- """cloud-init expects a straightforward array of keys to be dropped
- into the user's authorized_keys file. Azure control plane exposes
- multiple public keys to the VM via wireserver. Select just the
- user's key(s) and return them, ignoring any other certs.
+ @staticmethod
+ def _filter_pubkeys(keys_by_fingerprint, pubkey_info):
+ """ Filter and return only the user's actual pubkeys.
+
+ @param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict
+ that was obtained from GoalState Certificates XML. May contain
+ non-user pubkeys.
+ @param pubkey_info: List of VM user pubkeys. Pubkey values are added
+ to the return list without further processing. Pubkey fingerprints
+ are used to filter and obtain the actual pubkey values from
+ keys_by_fingerprint.
+ @return: A list of the VM user's authorized pubkey values.
"""
keys = []
for pubkey in pubkey_info:
@@ -587,30 +867,6 @@ class WALinuxAgentShim(object):
return keys
- @azure_ds_telemetry_reporter
- def _report_ready(self, goal_state, http_client):
- LOG.debug('Reporting ready to Azure fabric.')
- document = self.REPORT_READY_XML_TEMPLATE.format(
- incarnation=goal_state.incarnation,
- container_id=goal_state.container_id,
- instance_id=goal_state.instance_id,
- )
- # Host will collect kvps when cloud-init reports ready.
- # some kvps might still be in the queue. We yield the scheduler
- # to make sure we process all kvps up till this point.
- time.sleep(0)
- try:
- http_client.post(
- "http://{0}/machine?comp=health".format(self.endpoint),
- data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
- )
- except Exception as e:
- report_diagnostic_event("exception while reporting ready: %s" % e)
- raise
-
- LOG.info('Reported ready to Azure fabric.')
-
@azure_ds_telemetry_reporter
def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
@@ -623,10 +879,16 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
shim.clean_up()
-class EphemeralDHCPv4WithReporting(object):
+def dhcp_log_cb(out, err):
+ report_diagnostic_event("dhclient output stream: %s" % out)
+ report_diagnostic_event("dhclient error stream: %s" % err)
+
+
+class EphemeralDHCPv4WithReporting:
def __init__(self, reporter, nic=None):
self.reporter = reporter
- self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic)
+ self.ephemeralDHCPv4 = EphemeralDHCPv4(
+ iface=nic, dhcp_log_func=dhcp_log_cb)
def __enter__(self):
with events.ReportEventStack(
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index 0e7cccac..b545c4d6 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,6 +8,7 @@ import random
from cloudinit import log as logging
from cloudinit import net as cloudnet
from cloudinit import url_helper
+from cloudinit import subp
from cloudinit import util
NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
@@ -15,7 +16,7 @@ NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
LOG = logging.getLogger(__name__)
-def assign_ipv4_link_local(nic=None):
+def assign_ipv4_link_local(distro, nic=None):
"""Bring up NIC using an address using link-local (ip4LL) IPs. On
DigitalOcean, the link-local domain is per-droplet routed, so there
is no risk of collisions. However, to be more safe, the ip4LL
@@ -23,7 +24,7 @@ def assign_ipv4_link_local(nic=None):
"""
if not nic:
- nic = get_link_local_nic()
+ nic = get_link_local_nic(distro)
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
@@ -36,14 +37,14 @@ def assign_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
- if not util.which('ip'):
+ if not subp.which('ip'):
raise RuntimeError("No 'ip' command available to configure ip4LL "
"address")
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic)
- util.subp(ip_link_cmd)
+ subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
@@ -53,8 +54,12 @@ def assign_ipv4_link_local(nic=None):
return nic
-def get_link_local_nic():
- nics = [f for f in cloudnet.get_devicelist() if cloudnet.is_physical(f)]
+def get_link_local_nic(distro):
+ nics = [
+ f
+ for f in cloudnet.get_devicelist()
+ if distro.networking.is_physical(f)
+ ]
if not nics:
return None
return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
@@ -74,7 +79,7 @@ def del_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("removed ip4LL addresses from %s", nic)
except Exception as e:
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 2554530d..72edb023 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -7,6 +7,9 @@ from cloudinit import log as logging
from cloudinit import url_helper
from cloudinit import util
+import base64
+import binascii
+
LOG = logging.getLogger(__name__)
@@ -24,3 +27,19 @@ def read_userdata(url, timeout=2, sec_between=2, retries=30):
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
+
+
+def maybe_b64decode(data: bytes) -> bytes:
+ """base64 decode data
+
+ If data is base64 encoded bytes, return b64decode(data).
+ If not, return data unmodified.
+
+ @param data: data as bytes. TypeError is raised if not bytes.
+ """
+ if not isinstance(data, bytes):
+ raise TypeError("data is '%s', expected bytes" % type(data))
+ try:
+ return base64.b64decode(data, validate=True)
+ except binascii.Error:
+ return data
diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py
index d377ae3d..c2ad587b 100644
--- a/cloudinit/sources/helpers/netlink.py
+++ b/cloudinit/sources/helpers/netlink.py
@@ -55,7 +55,6 @@ NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq',
class NetlinkCreateSocketError(RuntimeError):
'''Raised if netlink socket fails during create or bind.'''
- pass
def create_bound_netlink_socket():
@@ -75,7 +74,7 @@ def create_bound_netlink_socket():
netlink_socket.setblocking(0)
except socket.error as e:
msg = "Exception during netlink socket create: %s" % e
- raise NetlinkCreateSocketError(msg)
+ raise NetlinkCreateSocketError(msg) from e
LOG.debug("Created netlink socket")
return netlink_socket
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 441db506..65e020c5 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -16,6 +16,7 @@ from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit.sources import BrokenMetadata
@@ -68,6 +69,7 @@ KNOWN_PHYSICAL_TYPES = (
None,
'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
+ 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
'dvs',
'ethernet',
'hw_veb',
@@ -109,7 +111,7 @@ class SourceMixin(object):
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return device
@@ -278,8 +280,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
data = translator(data)
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
if found:
results[name] = data
@@ -289,8 +292,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
metadata['random_seed'] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
- raise BrokenMetadata("Badly formatted metadata"
- " random_seed entry: %s" % e)
+ raise BrokenMetadata(
+ "Badly formatted metadata random_seed entry: %s" % e
+ ) from e
# load any files that were provided
files = {}
@@ -302,8 +306,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
files[path] = self._read_content_path(item)
except Exception as e:
- raise BrokenMetadata("Failed to read provided "
- "file %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to read provided file %s: %s" % (path, e)
+ ) from e
results['files'] = files
# The 'network_config' item in metadata is a content pointer
@@ -315,8 +320,9 @@ class BaseReader(metaclass=abc.ABCMeta):
content = self._read_content_path(net_item, decode=True)
results['network_config'] = content
except IOError as e:
- raise BrokenMetadata("Failed to read network"
- " configuration: %s" % (e))
+ raise BrokenMetadata(
+ "Failed to read network configuration: %s" % (e)
+ ) from e
# To openstack, user can specify meta ('nova boot --meta=key=value')
# and those will appear under metadata['meta'].
@@ -368,8 +374,9 @@ class ConfigDriveReader(BaseReader):
try:
return util.load_json(self._path_read(path))
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
def read_v1(self):
"""Reads a version 1 formatted location.
@@ -393,13 +400,17 @@ class ConfigDriveReader(BaseReader):
path = found[name]
try:
contents = self._path_read(path)
- except IOError:
- raise BrokenMetadata("Failed to read: %s" % path)
+ except IOError as e:
+ raise BrokenMetadata("Failed to read: %s" % path) from e
try:
- md[key] = translator(contents)
+ # Disable not-callable pylint check; pylint isn't able to
+ # determine that every member of FILES_V1 has a callable in
+ # the appropriate position
+ md[key] = translator(contents) # pylint: disable=E1102
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
else:
md[key] = copy.deepcopy(default)
@@ -410,8 +421,11 @@ class ConfigDriveReader(BaseReader):
keydata = meta_js.get('public-keys', keydata)
if keydata:
lines = keydata.splitlines()
- md['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
+ md['public-keys'] = [
+ line
+ for line in lines
+ if len(line) and not line.startswith("#")
+ ]
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
@@ -673,11 +687,13 @@ def convert_net_json(network_json=None, known_macs=None):
raise ValueError("Unable to find a system nic for %s" % d)
d['name'] = known_macs[mac]
- for cfg, key, fmt, target in link_updates:
- if isinstance(target, (list, tuple)):
- cfg[key] = [fmt % link_id_info[l]['name'] for l in target]
+ for cfg, key, fmt, targets in link_updates:
+ if isinstance(targets, (list, tuple)):
+ cfg[key] = [
+ fmt % link_id_info[target]['name'] for target in targets
+ ]
else:
- cfg[key] = fmt % link_id_info[target]['name']
+ cfg[key] = fmt % link_id_info[targets]['name']
# Infiniband interfaces may be referenced in network_data.json by a 6 byte
# Ethernet MAC-style address, and we use that address to look up the
diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py
index c2898a16..10760bd6 100644
--- a/cloudinit/sources/helpers/tests/test_netlink.py
+++ b/cloudinit/sources/helpers/tests/test_netlink.py
@@ -87,7 +87,7 @@ class TestParseNetlinkMessage(CiTestCase):
data = None
with self.assertRaises(AssertionError) as context:
read_rta_oper_state(data)
- self.assertTrue('data is none', str(context.exception))
+ self.assertEqual('data is none', str(context.exception))
def test_read_invalid_rta_operstate_none(self):
'''read_rta_oper_state returns none if operstate is none'''
@@ -180,17 +180,22 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
other_ifname = "eth1"
expected_ifname = "eth0"
data_op_down_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_DOWN)
+ other_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_UP)
+ other_ifname, RTM_NEWLINK, OPER_UP
+ )
data_op_down_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_DOWN)
+ expected_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_op_down_eth1,
- data_op_up_eth1,
- data_op_down_eth0,
- data_op_up_eth0]
+ expected_ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_op_down_eth1,
+ data_op_up_eth1,
+ data_op_down_eth0,
+ data_op_up_eth0
+ ]
wait_for_media_disconnect_connect(m_socket, expected_ifname)
self.assertIn('Ignored netlink event on interface %s' % other_ifname,
self.logs.getvalue())
@@ -207,17 +212,23 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_getlink_down = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_DOWN)
+ ifname, RTM_GETLINK, OPER_DOWN
+ )
data_getlink_up = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_UP)
+ ifname, RTM_GETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_getlink_down,
- data_getlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_getlink_down,
+ data_getlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -233,19 +244,25 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_setlink_down = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_DOWN)
+ ifname, RTM_SETLINK, OPER_DOWN
+ )
data_setlink_up = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_UP)
+ ifname, RTM_SETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_setlink_down,
- data_setlink_up,
- data_newlink_down,
- data_newlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_setlink_down,
+ data_setlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -255,23 +272,30 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_notpresent = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_NOTPRESENT)
- data_op_lowerdown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_LOWERLAYERDOWN)
- data_op_testing = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_TESTING)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_up, data_op_up,
- data_op_dormant, data_op_up,
- data_op_notpresent, data_op_up,
- data_op_lowerdown, data_op_up,
- data_op_testing, data_op_up,
- data_op_unknown, data_op_up,
- data_op_down, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_notpresent = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_NOTPRESENT
+ )
+ data_op_lowerdown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN
+ )
+ data_op_testing = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_TESTING
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_up, data_op_up,
+ data_op_dormant, data_op_up,
+ data_op_notpresent, data_op_up,
+ data_op_lowerdown, data_op_up,
+ data_op_testing, data_op_up,
+ data_op_unknown, data_op_up,
+ data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 14)
@@ -281,12 +305,14 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_down, data_op_dormant,
- data_op_unknown, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT)
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN)
+ m_read_netlink_socket.side_effect = [
+ data_op_down, data_op_dormant,
+ data_op_unknown, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -300,9 +326,11 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
- m_read_netlink_socket.side_effect = [data_op_invalid, data_op_up,
- data_op_down, data_op_invalid,
- data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_op_invalid, data_op_up,
+ data_op_down, data_op_invalid,
+ data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 5)
@@ -333,8 +361,9 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_invalid1, data_invalid2,
- data_op_down, data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_invalid1, data_invalid2, data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -344,11 +373,15 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
bytes = ifname.encode("utf-8")
data = bytearray(96)
struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
- 3, bytes, 5, 16, int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
+ 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.return_value = data
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 1)
@@ -360,14 +393,18 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data1 = bytearray(112)
data2 = bytearray(32)
struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.side_effect = [data1, data2]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 2)
diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py
new file mode 100644
index 00000000..2bde1e3f
--- /dev/null
+++ b/cloudinit/sources/helpers/tests/test_openstack.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# ./cloudinit/sources/helpers/tests/test_openstack.py
+
+from cloudinit.sources.helpers import openstack
+from cloudinit.tests import helpers as test_helpers
+
+
+class TestConvertNetJson(test_helpers.CiTestCase):
+
+ def test_phy_types(self):
+ """Verify the different known physical types are handled."""
+ # network_data.json example from
+ # https://docs.openstack.org/nova/latest/user/metadata.html
+ mac0 = "fa:16:3e:9c:bf:3d"
+ net_json = {
+ "links": [
+ {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a",
+ "mtu": None, "type": "bridge",
+ "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"}
+ ],
+ "networks": [
+ {"id": "network0", "link": "tapcd9f6d46-4a",
+ "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
+ "type": "ipv4_dhcp"}
+ ],
+ "services": [{"address": "8.8.8.8", "type": "dns"}]
+ }
+ macs = {mac0: 'eth0'}
+
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'mac_address': 'fa:16:3e:9c:bf:3d',
+ 'mtu': None, 'name': 'eth0',
+ 'subnets': [{'type': 'dhcp4'}],
+ 'type': 'physical'},
+ {'address': '8.8.8.8', 'type': 'nameserver'}]}
+
+ for t in openstack.KNOWN_PHYSICAL_TYPES:
+ net_json["links"][0]["type"] = t
+ self.assertEqual(
+ expected,
+ openstack.convert_net_json(network_json=net_json,
+ known_macs=macs))
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 2eaeff34..7109aef3 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -25,6 +25,8 @@ class Config(object):
SUFFIX = 'DNS|SUFFIX|'
TIMEZONE = 'DATETIME|TIMEZONE'
UTC = 'DATETIME|UTC'
+ POST_GC_STATUS = 'MISC|POST-GC-STATUS'
+ DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
def __init__(self, configFile):
self._configFile = configFile
@@ -104,4 +106,28 @@ class Config(object):
def custom_script_name(self):
"""Return the name of custom (pre/post) script."""
return self._configFile.get(Config.CUSTOM_SCRIPT, None)
+
+ @property
+ def post_gc_status(self):
+ """Return whether to post guestinfo.gc.status VMX property."""
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = postGcStatus.lower()
+ if postGcStatus not in ('yes', 'no'):
+ raise ValueError('PostGcStatus value should be yes/no')
+ return postGcStatus == 'yes'
+
+ @property
+ def default_run_post_script(self):
+ """
+ Return enable-custom-scripts default value if enable-custom-scripts
+ is absent in VM Tools configuration
+ """
+ defaultRunPostScript = self._configFile.get(
+ Config.DEFAULT_RUN_POST_SCRIPT,
+ 'no')
+ defaultRunPostScript = defaultRunPostScript.lower()
+ if defaultRunPostScript not in ('yes', 'no'):
+ raise ValueError('defaultRunPostScript value should be yes/no')
+ return defaultRunPostScript == 'yes'
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 9f14770e..2ab22de9 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,6 +9,7 @@ import logging
import os
import stat
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -61,7 +62,7 @@ class PreCustomScript(RunCustomScript):
"""Executing custom script with precustomization argument."""
LOG.debug("Executing pre-customization script")
self.prepare_script()
- util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
+ subp.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
class PostCustomScript(RunCustomScript):
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 602af078..fc034c95 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -22,7 +22,6 @@ class ConfigFile(ConfigSource, dict):
def __init__(self, filename):
self._loadConfigFile(filename)
- pass
def _insertKey(self, key, val):
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 2f29edd4..5899d8f7 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -10,6 +10,5 @@ from .config_source import ConfigSource
class ConfigNamespace(ConfigSource):
"""Specifies the Config Namespace."""
- pass
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 77cbf3b6..3745a262 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -10,6 +10,7 @@ import os
import re
from cloudinit.net.network_state import mask_to_net_prefix
+from cloudinit import subp
from cloudinit import util
logger = logging.getLogger(__name__)
@@ -73,7 +74,7 @@ class NicConfigurator(object):
The mac address(es) are in the lower case
"""
cmd = ['ip', 'addr', 'show']
- output, _err = util.subp(cmd)
+ output, _err = subp.subp(cmd)
sections = re.split(r'\n\d+: ', '\n' + output)[1:]
macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
@@ -248,8 +249,8 @@ class NicConfigurator(object):
logger.info('Clearing DHCP leases')
# Ignore the return code 1.
- util.subp(["pkill", "dhclient"], rcs=[0, 1])
- util.subp(["rm", "-f", "/var/lib/dhcp/*"])
+ subp.subp(["pkill", "dhclient"], rcs=[0, 1])
+ subp.subp(["rm", "-f", "/var/lib/dhcp/*"])
def configure(self, osfamily=None):
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index 8c91fa41..d16a7690 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,6 +9,7 @@
import logging
import os
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -56,10 +57,10 @@ class PasswordConfigurator(object):
LOG.info('Expiring password.')
for user in uidUserList:
try:
- util.subp(['passwd', '--expire', user])
- except util.ProcessExecutionError as e:
+ subp.subp(['passwd', '--expire', user])
+ except subp.ProcessExecutionError as e:
if os.path.exists('/usr/bin/chage'):
- util.subp(['chage', '-d', '0', user])
+ subp.subp(['chage', '-d', '0', user])
else:
LOG.warning('Failed to expire password for %s with error: '
'%s', user, e)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index 2f8ea546..7ec06a9c 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -8,6 +8,5 @@
class ConfigSource(object):
"""Specifies a source for the Config Content."""
- pass
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 3d369d04..d919f693 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -10,7 +10,7 @@ import os
import re
import time
-from cloudinit import util
+from cloudinit import subp
from .guestcust_event import GuestCustEventEnum
from .guestcust_state import GuestCustStateEnum
@@ -34,7 +34,7 @@ def send_rpc(rpc):
try:
logger.debug("Sending RPC command: %s", rpc)
- (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
+ (out, err) = subp.subp(["vmware-rpctool", rpc], rcs=[0])
# Remove the trailing newline in the output.
if out:
out = out.rstrip()
@@ -128,30 +128,46 @@ def get_tools_config(section, key, defaultVal):
not installed.
"""
- if not util.which('vmware-toolbox-cmd'):
+ if not subp.which('vmware-toolbox-cmd'):
logger.debug(
'vmware-toolbox-cmd not installed, returning default value')
return defaultVal
- retValue = defaultVal
cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
try:
- (outText, _) = util.subp(cmd)
- m = re.match(r'([^=]+)=(.*)', outText)
- if m:
- retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
- else:
+ (outText, _) = subp.subp(cmd)
+ except subp.ProcessExecutionError as e:
+ if e.exit_code == 69:
logger.debug(
- "Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
- except util.ProcessExecutionError as e:
- logger.error("Failed running %s[%s]", cmd, e.exit_code)
- logger.exception(e)
+ "vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
+ " Return default value: %s", " ".join(cmd), defaultVal)
+ else:
+ logger.error("Failed running %s[%s]", cmd, e.exit_code)
+ logger.exception(e)
+ return defaultVal
+
+ retValue = defaultVal
+ m = re.match(r'([^=]+)=(.*)', outText)
+ if m:
+ retValue = m.group(2).strip()
+ logger.debug("Get tools config: [%s] %s = %s",
+ section, key, retValue)
+ else:
+ logger.debug(
+ "Tools config: [%s] %s is not found, return default value: %s",
+ section, key, retValue)
return retValue
+# Sets message to the VMX guestinfo.gc.status property to the
+# underlying VMware Virtualization Platform.
+def set_gc_status(config, gcMsg):
+ if config and config.post_gc_status:
+ rpc = "info-set guestinfo.gc.status %s" % gcMsg
+ return send_rpc(rpc)
+ return None
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index f73b37ed..1420a988 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -55,6 +55,7 @@ class InvalidDataSourceTestSubclassNet(DataSource):
class TestDataSource(CiTestCase):
with_logs = True
+ maxDiff = None
def setUp(self):
super(TestDataSource, self).setUp()
@@ -288,27 +289,47 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- datasource.get_data()
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
expected = {
'base64_encoded_keys': [],
- 'sensitive_keys': [],
+ 'merged_cfg': REDACT_SENSITIVE_VALUE,
+ 'sensitive_keys': ['merged_cfg'],
+ 'sys_info': sys_info,
'v1': {
'_beta_keys': ['subplatform'],
'availability-zone': 'myaz',
'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
'instance-id': 'iid-datasource',
'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
'local_hostname': 'test-subclass-hostname',
+ 'kernel_release': '5.4.0-24-generic',
+ 'machine': 'x86_64',
'platform': 'mytestsubclass',
'public_ssh_keys': [],
+ 'python_version': '3.7',
'region': 'myregion',
- 'subplatform': 'unknown'},
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'subplatform': 'unknown',
+ 'variant': 'ubuntu'},
'ds': {
+
'_doc': EXPERIMENTAL_TEXT,
'meta_data': {'availability_zone': 'myaz',
'local-hostname': 'test-subclass-hostname',
@@ -318,8 +339,8 @@ class TestDataSource(CiTestCase):
self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
self.assertEqual(expected, util.load_json(content))
- def test_get_data_writes_json_instance_data_sensitive(self):
- """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root."""
+ def test_get_data_writes_redacted_public_json_instance_data(self):
+ """get_data writes redacted content to public INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
@@ -329,33 +350,49 @@ class TestDataSource(CiTestCase):
'region': 'myregion',
'some': {'security-credentials': {
'cred1': 'sekret', 'cred2': 'othersekret'}}})
- self.assertEqual(
- ('security-credentials',), datasource.sensitive_metadata_keys)
- datasource.get_data()
+ self.assertCountEqual(
+ ('merged_cfg', 'security-credentials',),
+ datasource.sensitive_metadata_keys)
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
redacted = util.load_json(util.load_file(json_file))
- self.assertEqual(
- {'cred1': 'sekret', 'cred2': 'othersekret'},
- redacted['ds']['meta_data']['some']['security-credentials'])
- content = util.load_file(sensitive_json_file)
expected = {
'base64_encoded_keys': [],
- 'sensitive_keys': ['ds/meta_data/some/security-credentials'],
+ 'merged_cfg': REDACT_SENSITIVE_VALUE,
+ 'sensitive_keys': [
+ 'ds/meta_data/some/security-credentials', 'merged_cfg'],
+ 'sys_info': sys_info,
'v1': {
'_beta_keys': ['subplatform'],
'availability-zone': 'myaz',
'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
'instance-id': 'iid-datasource',
'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
'local_hostname': 'test-subclass-hostname',
+ 'kernel_release': '5.4.0-24-generic',
+ 'machine': 'x86_64',
'platform': 'mytestsubclass',
'public_ssh_keys': [],
+ 'python_version': '3.7',
'region': 'myregion',
- 'subplatform': 'unknown'},
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'subplatform': 'unknown',
+ 'variant': 'ubuntu'},
'ds': {
'_doc': EXPERIMENTAL_TEXT,
'meta_data': {
@@ -364,8 +401,83 @@ class TestDataSource(CiTestCase):
'region': 'myregion',
'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
}
- self.maxDiff = None
- self.assertEqual(expected, util.load_json(content))
+ self.assertCountEqual(expected, redacted)
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """
+ get_data writes unmodified data to sensitive file as root-readonly.
+ """
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_metadata={
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': {
+ 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+
+ self.assertCountEqual(
+ ('merged_cfg', 'security-credentials',),
+ datasource.sensitive_metadata_keys)
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ 'base64_encoded_keys': [],
+ 'merged_cfg': {
+ '_doc': (
+ 'Merged cloud-init system config from '
+ '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'
+ ),
+ 'datasource': {'_undef': {'key1': False}}},
+ 'sensitive_keys': [
+ 'ds/meta_data/some/security-credentials', 'merged_cfg'],
+ 'sys_info': sys_info,
+ 'v1': {
+ '_beta_keys': ['subplatform'],
+ 'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
+ 'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
+ 'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
+ 'kernel_release': '5.4.0-24-generic',
+ 'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
+ 'machine': 'x86_64',
+ 'platform': 'mytestsubclass',
+ 'public_ssh_keys': [],
+ 'python_version': '3.7',
+ 'region': 'myregion',
+ 'subplatform': 'unknown',
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'variant': 'ubuntu'},
+ 'ds': {
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {
+ 'security-credentials':
+ {'cred1': 'sekret', 'cred2': 'othersekret'}}}}
+ }
+ self.assertCountEqual(expected, util.load_json(content))
file_stat = os.stat(sensitive_json_file)
self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
self.assertEqual(expected, util.load_json(content))
@@ -431,7 +543,7 @@ class TestDataSource(CiTestCase):
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertItemsEqual(
+ self.assertCountEqual(
['ds/meta_data/key2/key2.1'],
instance_json['base64_encoded_keys'])
self.assertEqual(
@@ -440,9 +552,7 @@ class TestDataSource(CiTestCase):
def test_get_hostname_subclass_support(self):
"""Validate get_hostname signature on all subclasses of DataSource."""
- # Use inspect.getfullargspec when we drop py2.6 and py2.7
- get_args = inspect.getargspec # pylint: disable=W1505
- base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505
+ base_args = inspect.getfullargspec(DataSource.get_hostname)
# Import all DataSource subclasses so we can inspect them.
modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
for _loc, name in modules.items():
@@ -454,13 +564,13 @@ class TestDataSource(CiTestCase):
continue
self.assertEqual(
base_args,
- get_args(child.get_hostname), # pylint: disable=W1505
+ inspect.getfullargspec(child.get_hostname),
'%s does not implement DataSource.get_hostname params'
% child)
for grandchild in child.__subclasses__():
self.assertEqual(
base_args,
- get_args(grandchild.get_hostname), # pylint: disable=W1505
+ inspect.getfullargspec(grandchild.get_hostname),
'%s does not implement DataSource.get_hostname params'
% grandchild)
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
index abf3d359..7bd23813 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -1,22 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.sources import DataSourceOracle as oracle
-from cloudinit.sources import BrokenMetadata, NetworkConfigSource
-from cloudinit import helpers
-
-from cloudinit.tests import helpers as test_helpers
-
-from textwrap import dedent
-import argparse
+import base64
import copy
-import httpretty
import json
-import os
-import uuid
+from contextlib import ExitStack
from unittest import mock
+import pytest
+
+from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources import NetworkConfigSource
+from cloudinit.sources.DataSourceOracle import OpcMetadata
+from cloudinit.tests import helpers as test_helpers
+from cloudinit.url_helper import UrlError
+
DS_PATH = "cloudinit.sources.DataSourceOracle"
-MD_VER = "2013-10-17"
# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine
# with a secondary VNIC attached (vnicId truncated for Python line length)
@@ -59,328 +57,99 @@ OPC_VM_SECONDARY_VNIC_RESPONSE = """\
} ]"""
-class TestDataSourceOracle(test_helpers.CiTestCase):
- """Test datasource DataSourceOracle."""
-
- with_logs = True
-
- ds_class = oracle.DataSourceOracle
-
- my_uuid = str(uuid.uuid4())
- my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
- "name": "ci-vm1", "availability_zone": "phx-ad-3",
- "hostname": "ci-vm1hostname",
- "launch_index": 0, "files": [],
- "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
- "meta": {}}
-
- def _patch_instance(self, inst, patches):
- """Patch an instance of a class 'inst'.
- for each name, kwargs in patches:
- inst.name = mock.Mock(**kwargs)
- returns a namespace object that has
- namespace.name = mock.Mock(**kwargs)
- Do not bother with cleanup as instance is assumed transient."""
- mocks = argparse.Namespace()
- for name, kwargs in patches.items():
- imock = mock.Mock(name=name, spec=getattr(inst, name), **kwargs)
- setattr(mocks, name, imock)
- setattr(inst, name, imock)
- return mocks
-
- def _get_ds(self, sys_cfg=None, distro=None, paths=None, ud_proc=None,
- patches=None):
- if sys_cfg is None:
- sys_cfg = {}
- if patches is None:
- patches = {}
- if paths is None:
- tmpd = self.tmp_dir()
- dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
- 'run_dir': self.tmp_path('run_dir')}
- for d in dirs.values():
- os.mkdir(d)
- paths = helpers.Paths(dirs)
-
- ds = self.ds_class(sys_cfg=sys_cfg, distro=distro,
- paths=paths, ud_proc=ud_proc)
-
- return ds, self._patch_instance(ds, patches)
-
- def test_platform_not_viable_returns_false(self):
- ds, mocks = self._get_ds(
- patches={'_is_platform_viable': {'return_value': False}})
- self.assertFalse(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
-
- def test_platform_info(self):
- """Return platform-related information for Oracle Datasource."""
- ds, _mocks = self._get_ds()
- self.assertEqual('oracle', ds.cloud_name)
- self.assertEqual('oracle', ds.platform_type)
- self.assertEqual(
- 'metadata (http://169.254.169.254/openstack/)', ds.subplatform)
-
- def test_sys_cfg_can_enable_configure_secondary_nics(self):
- # Confirm that behaviour is toggled by sys_cfg
- ds, _mocks = self._get_ds()
- self.assertFalse(ds.ds_cfg['configure_secondary_nics'])
-
- sys_cfg = {
- 'datasource': {'Oracle': {'configure_secondary_nics': True}}}
- ds, _mocks = self._get_ds(sys_cfg=sys_cfg)
- self.assertTrue(ds.ds_cfg['configure_secondary_nics'])
-
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_without_userdata(self, m_is_iscsi_root):
- """If no user-data is provided, it should not be in return dict."""
- ds, mocks = self._get_ds(patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- self.assertTrue(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
- mocks.crawl_metadata.assert_called_once_with()
- self.assertEqual(self.my_uuid, ds.system_uuid)
- self.assertEqual(self.my_md['availability_zone'], ds.availability_zone)
- self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
- self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
- self.assertIsNone(ds.userdata_raw)
-
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_with_vendordata(self, m_is_iscsi_root):
- """Test with vendor data."""
- vd = {'cloud-init': '#cloud-config\nkey: value'}
- ds, mocks = self._get_ds(patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md,
- 'vendor_data': vd}}}})
- self.assertTrue(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
- mocks.crawl_metadata.assert_called_once_with()
- self.assertEqual(vd, ds.vendordata_pure)
- self.assertEqual(vd['cloud-init'], ds.vendordata_raw)
-
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_with_userdata(self, m_is_iscsi_root):
- """Ensure user-data is populated if present and is binary."""
- my_userdata = b'abcdefg'
- ds, mocks = self._get_ds(patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md,
- 'user_data': my_userdata}}}})
- self.assertTrue(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
- mocks.crawl_metadata.assert_called_once_with()
- self.assertEqual(self.my_uuid, ds.system_uuid)
- self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
- self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
- self.assertEqual(my_userdata, ds.userdata_raw)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
- side_effect=lambda network_config: network_config)
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config,
- _m_add_network_config_from_opc_imds):
- """network_config should read kernel cmdline."""
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_initramfs_config.return_value = ncfg
- self.assertTrue(ds._get_data())
- self.assertEqual(ncfg, ds.network_config)
- self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
- self.assertFalse(distro.generate_fallback_config.called)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
- side_effect=lambda network_config: network_config)
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config,
- _m_add_network_config_from_opc_imds):
- """test that fallback network is generated if no kernel cmdline."""
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_initramfs_config.return_value = None
- self.assertTrue(ds._get_data())
- ncfg = {'version': 1, 'config': [{'distro1': 'value'}]}
- distro.generate_fallback_config.return_value = ncfg
- self.assertEqual(ncfg, ds.network_config)
- self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
- distro.generate_fallback_config.assert_called_once_with()
-
- # test that the result got cached, and the methods not re-called.
- self.assertEqual(ncfg, ds.network_config)
- self.assertEqual(1, m_initramfs_config.call_count)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
- return_value={'some': 'config'})
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_secondary_nics_added_to_network_config_if_enabled(
- self, _m_is_iscsi_root, _m_initramfs_config,
- m_add_network_config_from_opc_imds):
-
- needle = object()
-
- def network_config_side_effect(network_config):
- network_config['secondary_added'] = needle
-
- m_add_network_config_from_opc_imds.side_effect = (
- network_config_side_effect)
-
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ds.ds_cfg['configure_secondary_nics'] = True
- self.assertEqual(needle, ds.network_config['secondary_added'])
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
- return_value={'some': 'config'})
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_secondary_nics_not_added_to_network_config_by_default(
- self, _m_is_iscsi_root, _m_initramfs_config,
- m_add_network_config_from_opc_imds):
-
- def network_config_side_effect(network_config):
- network_config['secondary_added'] = True
-
- m_add_network_config_from_opc_imds.side_effect = (
- network_config_side_effect)
-
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- self.assertNotIn('secondary_added', ds.network_config)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_secondary_nic_failure_isnt_blocking(
- self, _m_is_iscsi_root, m_initramfs_config,
- m_add_network_config_from_opc_imds):
-
- m_add_network_config_from_opc_imds.side_effect = Exception()
-
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ds.ds_cfg['configure_secondary_nics'] = True
- self.assertEqual(ds.network_config, m_initramfs_config.return_value)
- self.assertIn('Failed to fetch secondary network configuration',
- self.logs.getvalue())
-
- def test_ds_network_cfg_preferred_over_initramfs(self):
- """Ensure that DS net config is preferred over initramfs config"""
- network_config_sources = oracle.DataSourceOracle.network_config_sources
- self.assertLess(
- network_config_sources.index(NetworkConfigSource.ds),
- network_config_sources.index(NetworkConfigSource.initramfs)
- )
-
-
-@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4()))
-class TestReadMetaData(test_helpers.HttprettyTestCase):
- """Test the read_metadata which interacts with http metadata service."""
-
- mdurl = oracle.METADATA_ENDPOINT
- my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
- "name": "ci-vm1", "availability_zone": "phx-ad-3",
- "hostname": "ci-vm1hostname",
- "launch_index": 0, "files": [],
- "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
- "meta": {}}
-
- def populate_md(self, data):
- """call httppretty.register_url for each item dict 'data',
- including valid indexes. Text values converted to bytes."""
- httpretty.register_uri(
- httpretty.GET, self.mdurl + MD_VER + "/",
- '\n'.join(data.keys()).encode('utf-8'))
- for k, v in data.items():
- httpretty.register_uri(
- httpretty.GET, self.mdurl + MD_VER + "/" + k,
- v if not isinstance(v, str) else v.encode('utf-8'))
-
- def test_broken_no_sys_uuid(self, m_read_system_uuid):
- """Datasource requires ability to read system_uuid and true return."""
- m_read_system_uuid.return_value = None
- self.assertRaises(BrokenMetadata, oracle.read_metadata)
-
- def test_broken_no_metadata_json(self, m_read_system_uuid):
- """Datasource requires meta_data.json."""
- httpretty.register_uri(
- httpretty.GET, self.mdurl + MD_VER + "/",
- '\n'.join(['user_data']).encode('utf-8'))
- with self.assertRaises(BrokenMetadata) as cm:
- oracle.read_metadata()
- self.assertIn("Required field 'meta_data.json' missing",
- str(cm.exception))
-
- def test_with_userdata(self, m_read_system_uuid):
- data = {'user_data': b'#!/bin/sh\necho hi world\n',
- 'meta_data.json': json.dumps(self.my_md)}
- self.populate_md(data)
- result = oracle.read_metadata()[MD_VER]
- self.assertEqual(data['user_data'], result['user_data'])
- self.assertEqual(self.my_md, result['meta_data'])
-
- def test_without_userdata(self, m_read_system_uuid):
- data = {'meta_data.json': json.dumps(self.my_md)}
- self.populate_md(data)
- result = oracle.read_metadata()[MD_VER]
- self.assertNotIn('user_data', result)
- self.assertEqual(self.my_md, result['meta_data'])
-
- def test_unknown_fields_included(self, m_read_system_uuid):
- """Unknown fields listed in index should be included.
- And those ending in .json should be decoded."""
- some_data = {'key1': 'data1', 'subk1': {'subd1': 'subv'}}
- some_vendor_data = {'cloud-init': 'foo'}
- data = {'meta_data.json': json.dumps(self.my_md),
- 'some_data.json': json.dumps(some_data),
- 'vendor_data.json': json.dumps(some_vendor_data),
- 'other_blob': b'this is blob'}
- self.populate_md(data)
- result = oracle.read_metadata()[MD_VER]
- self.assertNotIn('user_data', result)
- self.assertEqual(self.my_md, result['meta_data'])
- self.assertEqual(some_data, result['some_data'])
- self.assertEqual(some_vendor_data, result['vendor_data'])
- self.assertEqual(data['other_blob'], result['other_blob'])
+# Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then
+# truncated for line length)
+OPC_V2_METADATA = """\
+{
+ "availabilityDomain" : "qIZq:PHX-AD-1",
+ "faultDomain" : "FAULT-DOMAIN-2",
+ "compartmentId" : "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmTRUNCATED",
+ "displayName" : "instance-20200320-1400",
+ "hostname" : "instance-20200320-1400",
+ "id" : "ocid1.instance.oc1.phx.anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ "image" : "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsicTRUNCATED",
+ "metadata" : {
+ "ssh_authorized_keys" : "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ "user_data" : "IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"
+ },
+ "region" : "phx",
+ "canonicalRegionName" : "us-phoenix-1",
+ "ociAdName" : "phx-ad-3",
+ "shape" : "VM.Standard2.1",
+ "state" : "Running",
+ "timeCreated" : 1584727285318,
+ "agentConfig" : {
+ "monitoringDisabled" : true,
+ "managementDisabled" : true
+ }
+}"""
+
+# Just a small meaningless change to differentiate the two metadatas
+OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance")
+
+
+@pytest.fixture
+def metadata_version():
+ return 2
+
+
+@pytest.yield_fixture
+def oracle_ds(request, fixture_utils, paths, metadata_version):
+ """
+ Return an instantiated DataSourceOracle.
+
+ This also performs the mocking required for the default test case:
+ * ``_read_system_uuid`` returns something,
+ * ``_is_platform_viable`` returns True,
+ * ``_is_iscsi_root`` returns True (the simpler code path),
+ * ``read_opc_metadata`` returns ``OPC_V1_METADATA``
+
+ (This uses the paths fixture for the required helpers.Paths object, and the
+ fixture_utils fixture for fetching markers.)
+ """
+ sys_cfg = fixture_utils.closest_marker_first_arg_or(
+ request, "ds_sys_cfg", mock.MagicMock()
+ )
+ metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None)
+ with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"):
+ with mock.patch(DS_PATH + "._is_platform_viable", return_value=True):
+ with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True):
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ return_value=metadata,
+ ):
+ yield oracle.DataSourceOracle(
+ sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths,
+ )
+
+
+class TestDataSourceOracle:
+ def test_platform_info(self, oracle_ds):
+ assert "oracle" == oracle_ds.cloud_name
+ assert "oracle" == oracle_ds.platform_type
+
+ def test_subplatform_before_fetch(self, oracle_ds):
+ assert 'unknown' == oracle_ds.subplatform
+
+ def test_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert 'metadata (http://169.254.169.254/opc/v2/)' == \
+ oracle_ds.subplatform
+
+ @pytest.mark.parametrize('metadata_version', [1])
+ def test_v1_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert 'metadata (http://169.254.169.254/opc/v1/)' == \
+ oracle_ds.subplatform
+
+ def test_secondary_nics_disabled_by_default(self, oracle_ds):
+ assert not oracle_ds.ds_cfg["configure_secondary_nics"]
+
+ @pytest.mark.ds_sys_cfg(
+ {"datasource": {"Oracle": {"configure_secondary_nics": True}}}
+ )
+ def test_sys_cfg_can_enable_configure_secondary_nics(self, oracle_ds):
+ assert oracle_ds.ds_cfg["configure_secondary_nics"]
class TestIsPlatformViable(test_helpers.CiTestCase):
@@ -404,192 +173,99 @@ class TestIsPlatformViable(test_helpers.CiTestCase):
m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
-class TestLoadIndex(test_helpers.CiTestCase):
- """_load_index handles parsing of an index into a proper list.
- The tests here guarantee correct parsing of html version or
- a fixed version. See the function docstring for more doc."""
-
- _known_html_api_versions = dedent("""\
- <html>
- <head><title>Index of /openstack/</title></head>
- <body bgcolor="white">
- <h1>Index of /openstack/</h1><hr><pre><a href="../">../</a>
- <a href="2013-10-17/">2013-10-17/</a> 27-Jun-2018 12:22 -
- <a href="latest/">latest/</a> 27-Jun-2018 12:22 -
- </pre><hr></body>
- </html>""")
-
- _known_html_contents = dedent("""\
- <html>
- <head><title>Index of /openstack/2013-10-17/</title></head>
- <body bgcolor="white">
- <h1>Index of /openstack/2013-10-17/</h1><hr><pre><a href="../">../</a>
- <a href="meta_data.json">meta_data.json</a> 27-Jun-2018 12:22 679
- <a href="user_data">user_data</a> 27-Jun-2018 12:22 146
- </pre><hr></body>
- </html>""")
-
- def test_parse_html(self):
- """Test parsing of lower case html."""
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index(self._known_html_api_versions))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index(self._known_html_contents))
-
- def test_parse_html_upper(self):
- """Test parsing of upper case html, although known content is lower."""
- def _toupper(data):
- return data.replace("<a", "<A").replace("html>", "HTML>")
-
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index(_toupper(self._known_html_api_versions)))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index(_toupper(self._known_html_contents)))
-
- def test_parse_newline_list_with_endl(self):
- """Test parsing of newline separated list with ending newline."""
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index("\n".join(["2013-10-17/", "latest/", ""])))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index("\n".join(["meta_data.json", "user_data", ""])))
-
- def test_parse_newline_list_without_endl(self):
- """Test parsing of newline separated list with no ending newline.
-
- Actual openstack implementation does not include trailing newline."""
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index("\n".join(["2013-10-17/", "latest/"])))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index("\n".join(["meta_data.json", "user_data"])))
-
-
-class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestNetworkConfigFromOpcImds, self).setUp()
- self.add_patch(DS_PATH + '.readurl', 'm_readurl')
- self.add_patch(DS_PATH + '.get_interfaces_by_mac',
- 'm_get_interfaces_by_mac')
-
- def test_failure_to_readurl(self):
- # readurl failures should just bubble out to the caller
- self.m_readurl.side_effect = Exception('oh no')
- with self.assertRaises(Exception) as excinfo:
- oracle._add_network_config_from_opc_imds({})
- self.assertEqual(str(excinfo.exception), 'oh no')
-
- def test_empty_response(self):
- # empty response error should just bubble out to the caller
- self.m_readurl.return_value = ''
- with self.assertRaises(Exception):
- oracle._add_network_config_from_opc_imds([])
-
- def test_invalid_json(self):
- # invalid JSON error should just bubble out to the caller
- self.m_readurl.return_value = '{'
- with self.assertRaises(Exception):
- oracle._add_network_config_from_opc_imds([])
-
- def test_no_secondary_nics_does_not_mutate_input(self):
- self.m_readurl.return_value = json.dumps([{}])
- # We test this by passing in a non-dict to ensure that no dict
+class TestNetworkConfigFromOpcImds:
+ def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds):
+ oracle_ds._vnics_data = [{}]
+ # We test this by using in a non-dict to ensure that no dict
# operations are used; failure would be seen as exceptions
- oracle._add_network_config_from_opc_imds(object())
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
- def test_bare_metal_machine_skipped(self):
+ def test_bare_metal_machine_skipped(self, oracle_ds, caplog):
# nicIndex in the first entry indicates a bare metal machine
- self.m_readurl.return_value = OPC_BM_SECONDARY_VNIC_RESPONSE
- # We test this by passing in a non-dict to ensure that no dict
+ oracle_ds._vnics_data = json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)
+ # We test this by using a non-dict to ensure that no dict
# operations are used
- self.assertFalse(oracle._add_network_config_from_opc_imds(object()))
- self.assertIn('bare metal machine', self.logs.getvalue())
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
+ assert 'bare metal machine' in caplog.text
- def test_missing_mac_skipped(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- self.m_get_interfaces_by_mac.return_value = {}
+ def test_missing_mac_skipped(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
- network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
- oracle._add_network_config_from_opc_imds(network_config)
+ oracle_ds._network_config = {
+ 'version': 1, 'config': [{'primary': 'nic'}]
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
- self.assertEqual(1, len(network_config['config']))
- self.assertIn(
- 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
- self.logs.getvalue())
+ assert 1 == len(oracle_ds.network_config['config'])
+ assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
+ caplog.text
- def test_missing_mac_skipped_v2(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- self.m_get_interfaces_by_mac.return_value = {}
+ def test_missing_mac_skipped_v2(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
- network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
- oracle._add_network_config_from_opc_imds(network_config)
+ oracle_ds._network_config = {
+ 'version': 2, 'ethernets': {'primary': {'nic': {}}}
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
- self.assertEqual(1, len(network_config['ethernets']))
- self.assertIn(
- 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
- self.logs.getvalue())
+ assert 1 == len(oracle_ds.network_config['ethernets'])
+ assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
+ caplog.text
- def test_secondary_nic(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
+ def test_secondary_nic(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ 'version': 1, 'config': [{'primary': 'nic'}]
}
-
- network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
- oracle._add_network_config_from_opc_imds(network_config)
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name}):
+ oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- self.assertEqual(2, len(network_config['config']))
+ assert 2 == len(oracle_ds.network_config['config'])
- secondary_nic_cfg = network_config['config'][1]
- self.assertEqual(nic_name, secondary_nic_cfg['name'])
- self.assertEqual('physical', secondary_nic_cfg['type'])
- self.assertEqual(mac_addr, secondary_nic_cfg['mac_address'])
- self.assertEqual(9000, secondary_nic_cfg['mtu'])
+ secondary_nic_cfg = oracle_ds.network_config['config'][1]
+ assert nic_name == secondary_nic_cfg['name']
+ assert 'physical' == secondary_nic_cfg['type']
+ assert mac_addr == secondary_nic_cfg['mac_address']
+ assert 9000 == secondary_nic_cfg['mtu']
- self.assertEqual(1, len(secondary_nic_cfg['subnets']))
+ assert 1 == len(secondary_nic_cfg['subnets'])
subnet_cfg = secondary_nic_cfg['subnets'][0]
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- self.assertEqual('10.0.0.231', subnet_cfg['address'])
+ assert '10.0.0.231' == subnet_cfg['address']
- def test_secondary_nic_v2(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
+ def test_secondary_nic_v2(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ 'version': 2, 'ethernets': {'primary': {'nic': {}}}
}
-
- network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
- oracle._add_network_config_from_opc_imds(network_config)
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name}):
+ oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- self.assertEqual(2, len(network_config['ethernets']))
+ assert 2 == len(oracle_ds.network_config['ethernets'])
- secondary_nic_cfg = network_config['ethernets']['ens3']
- self.assertFalse(secondary_nic_cfg['dhcp4'])
- self.assertFalse(secondary_nic_cfg['dhcp6'])
- self.assertEqual(mac_addr, secondary_nic_cfg['match']['macaddress'])
- self.assertEqual(9000, secondary_nic_cfg['mtu'])
+ secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3']
+ assert secondary_nic_cfg['dhcp4'] is False
+ assert secondary_nic_cfg['dhcp6'] is False
+ assert mac_addr == secondary_nic_cfg['match']['macaddress']
+ assert 9000 == secondary_nic_cfg['mtu']
- self.assertEqual(1, len(secondary_nic_cfg['addresses']))
+ assert 1 == len(secondary_nic_cfg['addresses'])
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- self.assertEqual('10.0.0.231', secondary_nic_cfg['addresses'][0])
+ assert '10.0.0.231' == secondary_nic_cfg['addresses'][0]
class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetworkConfigFiltersNetFailover, self).setUp()
self.add_patch(DS_PATH + '.get_interfaces_by_mac',
@@ -732,4 +408,378 @@ class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
self.assertEqual(expected_cfg, netcfg)
+def _mock_v2_urls(httpretty):
+ def instance_callback(request, uri, response_headers):
+ print(response_headers)
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_V2_METADATA]
+
+ def vnics_callback(request, uri, response_headers):
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE]
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ body=instance_callback
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/vnics/",
+ body=vnics_callback
+ )
+
+
+def _mock_no_v2_urls(httpretty):
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ status=404,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ body=OPC_V1_METADATA
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/vnics/",
+ body=OPC_BM_SECONDARY_VNIC_RESPONSE
+ )
+
+
+class TestReadOpcMetadata:
+ # See https://docs.pytest.org/en/stable/example
+ # /parametrize.html#parametrizing-conditional-raising
+ does_not_raise = ExitStack
+
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [
+ (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None),
+ (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None),
+ ]
+ )
+ def test_metadata_returned(
+ self, version, setup_urls, instance_data,
+ fetch_vnics, vnics_data, httpretty
+ ):
+ setup_urls(httpretty)
+ metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics)
+
+ assert version == metadata.version
+ assert instance_data == metadata.instance_data
+ assert vnics_data == metadata.vnics_data
+
+ # No need to actually wait between retries in the tests
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ "v2_failure_count,v1_failure_count,expected_body,expectation",
+ [
+ (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 3, None, pytest.raises(UrlError)),
+ ]
+ )
+ def test_retries(self, v2_failure_count, v1_failure_count,
+ expected_body, expectation, httpretty):
+ v2_responses = [httpretty.Response("", status=404)] * v2_failure_count
+ v2_responses.append(httpretty.Response(OPC_V2_METADATA))
+ v1_responses = [httpretty.Response("", status=404)] * v1_failure_count
+ v1_responses.append(httpretty.Response(OPC_V1_METADATA))
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ responses=v1_responses,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ responses=v2_responses,
+ )
+ with expectation:
+ assert expected_body == oracle.read_opc_metadata().instance_data
+
+
+class TestCommon_GetDataBehaviour:
+ """This test class tests behaviour common to iSCSI and non-iSCSI root.
+
+ It defines a fixture, parameterized_oracle_ds, which is used in all the
+ tests herein to test that the commonly expected behaviour is the same with
+ iSCSI root and without.
+
+ (As non-iSCSI root behaviour is a superset of iSCSI root behaviour this
+ class is implicitly also testing all iSCSI root behaviour so there is no
+ separate class for that case.)
+ """
+
+ @pytest.yield_fixture(params=[True, False])
+ def parameterized_oracle_ds(self, request, oracle_ds):
+ """oracle_ds parameterized for iSCSI and non-iSCSI root respectively"""
+ is_iscsi_root = request.param
+ with ExitStack() as stack:
+ stack.enter_context(
+ mock.patch(
+ DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root
+ )
+ )
+ if not is_iscsi_root:
+ stack.enter_context(
+ mock.patch(DS_PATH + ".net.find_fallback_nic")
+ )
+ stack.enter_context(
+ mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ )
+ yield oracle_ds
+
+ @mock.patch(
+ DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
+ )
+ def test_false_if_platform_not_viable(
+ self, parameterized_oracle_ds,
+ ):
+ assert not parameterized_oracle_ds._get_data()
+
+ @pytest.mark.parametrize(
+ "keyname,expected_value",
+ (
+ ("availability-zone", "phx-ad-3"),
+ ("launch-index", 0),
+ ("local-hostname", "instance-20200320-1400"),
+ (
+ "instance-id",
+ "ocid1.instance.oc1.phx"
+ ".anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ ),
+ ("name", "instance-20200320-1400"),
+ (
+ "public_keys",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ ),
+ ),
+ )
+ def test_metadata_keys_set_correctly(
+ self, keyname, expected_value, parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == parameterized_oracle_ds.metadata[keyname]
+
+ @pytest.mark.parametrize(
+ "attribute_name,expected_value",
+ [
+ ("_crawled_metadata", json.loads(OPC_V2_METADATA)),
+ (
+ "userdata_raw",
+ base64.b64decode(b"IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"),
+ ),
+ ("system_uuid", "my-test-uuid"),
+ ],
+ )
+ @mock.patch(
+ DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid")
+ )
+ def test_attributes_set_correctly(
+ self, attribute_name, expected_value, parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == getattr(
+ parameterized_oracle_ds, attribute_name
+ )
+
+ @pytest.mark.parametrize(
+ "ssh_keys,expected_value",
+ [
+ # No SSH keys in metadata => no keys detected
+ (None, []),
+ # Empty SSH keys in metadata => no keys detected
+ ("", []),
+ # Single SSH key in metadata => single key detected
+ ("ssh-rsa ... test@test", ["ssh-rsa ... test@test"]),
+ # Multiple SSH keys in metadata => multiple keys detected
+ (
+ "ssh-rsa ... test@test\nssh-rsa ... test2@test2",
+ ["ssh-rsa ... test@test", "ssh-rsa ... test2@test2"],
+ ),
+ ],
+ )
+ def test_public_keys_handled_correctly(
+ self, ssh_keys, expected_value, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ if ssh_keys is None:
+ del instance_data["metadata"]["ssh_authorized_keys"]
+ else:
+ instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert (
+ expected_value == parameterized_oracle_ds.get_public_ssh_keys()
+ )
+
+ def test_missing_user_data_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]["user_data"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+
+ def test_missing_metadata_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+ assert [] == parameterized_oracle_ds.get_public_ssh_keys()
+
+
+@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False)
+class TestNonIscsiRoot_GetDataBehaviour:
+ @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ @mock.patch(DS_PATH + ".net.find_fallback_nic")
+ def test_read_opc_metadata_called_with_ephemeral_dhcp(
+ self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds
+ ):
+ in_context_manager = False
+
+ def enter_context_manager():
+ nonlocal in_context_manager
+ in_context_manager = True
+
+ def exit_context_manager(*args):
+ nonlocal in_context_manager
+ in_context_manager = False
+
+ m_EphemeralDHCPv4.return_value.__enter__.side_effect = (
+ enter_context_manager
+ )
+ m_EphemeralDHCPv4.return_value.__exit__.side_effect = (
+ exit_context_manager
+ )
+
+ def assert_in_context_manager(**kwargs):
+ assert in_context_manager
+ return mock.MagicMock()
+
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(side_effect=assert_in_context_manager),
+ ):
+ assert oracle_ds._get_data()
+
+ assert [
+ mock.call(m_find_fallback_nic.return_value)
+ ] == m_EphemeralDHCPv4.call_args_list
+
+
+@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {})
+@mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
+class TestNetworkConfig:
+ def test_network_config_cached(self, m_read_initramfs_config, oracle_ds):
+ """.network_config should be cached"""
+ assert 0 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+
+ def test_network_cmdline(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+ m_read_initramfs_config.return_value = copy.deepcopy(ncfg)
+
+ assert ncfg == oracle_ds.network_config
+ assert 0 == oracle_ds.distro.generate_fallback_config.call_count
+
+ def test_network_fallback(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+
+ m_read_initramfs_config.return_value = None
+ oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy(
+ ncfg
+ )
+
+ assert ncfg == oracle_ds.network_config
+
+ @pytest.mark.parametrize(
+ "configure_secondary_nics,expect_secondary_nics",
+ [(True, True), (False, False), (None, False)],
+ )
+ def test_secondary_nic_addition(
+ self,
+ m_read_initramfs_config,
+ configure_secondary_nics,
+ expect_secondary_nics,
+ oracle_ds,
+ ):
+ """Test that _add_network_config_from_opc_imds is called as expected
+
+ (configure_secondary_nics=None is used to test the default behaviour.)
+ """
+ m_read_initramfs_config.return_value = {"version": 1, "config": []}
+
+ if configure_secondary_nics is not None:
+ oracle_ds.ds_cfg[
+ "configure_secondary_nics"
+ ] = configure_secondary_nics
+
+ def side_effect(self):
+ self._network_config["secondary_added"] = mock.sentinel.needle
+
+ oracle_ds._vnics_data = 'DummyData'
+ with mock.patch.object(
+ oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
+ new=side_effect,
+ ):
+ was_secondary_added = "secondary_added" in oracle_ds.network_config
+ assert expect_secondary_nics == was_secondary_added
+
+ def test_secondary_nic_failure_isnt_blocking(
+ self,
+ m_read_initramfs_config,
+ caplog,
+ oracle_ds,
+ ):
+ oracle_ds.ds_cfg["configure_secondary_nics"] = True
+ oracle_ds._vnics_data = "DummyData"
+
+ with mock.patch.object(
+ oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
+ side_effect=Exception()
+ ):
+ network_config = oracle_ds.network_config
+ assert network_config == m_read_initramfs_config.return_value
+ assert "Failed to parse secondary network configuration" in caplog.text
+
+ def test_ds_network_cfg_preferred_over_initramfs(self, _m):
+ """Ensure that DS net config is preferred over initramfs config"""
+ config_sources = oracle.DataSourceOracle.network_config_sources
+ ds_idx = config_sources.index(NetworkConfigSource.ds)
+ initramfs_idx = config_sources.index(NetworkConfigSource.initramfs)
+ assert ds_idx < initramfs_idx
+
+
# vi: ts=4 expandtab