summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/sources')
-rwxr-xr-x[-rw-r--r--]cloudinit/sources/DataSourceAzure.py474
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py8
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py26
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py15
-rw-r--r--cloudinit/sources/DataSourceEc2.py240
-rw-r--r--cloudinit/sources/DataSourceExoscale.py268
-rw-r--r--cloudinit/sources/DataSourceGCE.py30
-rw-r--r--cloudinit/sources/DataSourceHetzner.py3
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py2
-rw-r--r--cloudinit/sources/DataSourceMAAS.py2
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py40
-rw-r--r--cloudinit/sources/DataSourceOVF.py41
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py4
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py2
-rw-r--r--cloudinit/sources/DataSourceOracle.py170
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py251
-rw-r--r--cloudinit/sources/DataSourceScaleway.py20
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py8
-rw-r--r--cloudinit/sources/__init__.py50
-rwxr-xr-x[-rw-r--r--]cloudinit/sources/helpers/azure.py345
-rw-r--r--cloudinit/sources/helpers/openstack.py40
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py143
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py37
-rw-r--r--cloudinit/sources/tests/test_init.py35
-rw-r--r--cloudinit/sources/tests/test_oracle.py424
26 files changed, 2217 insertions, 462 deletions
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index a06e6e1f..61ec522a 100644..100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -13,7 +13,6 @@ import os
import os.path
import re
from time import time
-from subprocess import call
from xml.dom import minidom
import xml.etree.ElementTree as ET
@@ -22,10 +21,20 @@ from cloudinit import net
from cloudinit.event import EventType
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
-from cloudinit.sources.helpers.azure import get_metadata_from_fabric
from cloudinit.sources.helpers import netlink
from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
from cloudinit import util
+from cloudinit.reporting import events
+
+from cloudinit.sources.helpers.azure import (
+ azure_ds_reporter,
+ azure_ds_telemetry_reporter,
+ get_metadata_from_fabric,
+ get_boot_telemetry,
+ get_system_info,
+ report_diagnostic_event,
+ EphemeralDHCPv4WithReporting,
+ is_byte_swapped)
LOG = logging.getLogger(__name__)
@@ -54,8 +63,14 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
AGENT_SEED_DIR = '/var/lib/waagent'
+
+# In the event where the IMDS primary server is not
+# available, it takes 1s to fallback to the secondary one
+IMDS_TIMEOUT_IN_SECONDS = 2
IMDS_URL = "http://169.254.169.254/metadata/"
+PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
+
# List of static scripts and network config artifacts created by
# stock ubuntu suported images.
UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
@@ -196,6 +211,8 @@ if util.is_FreeBSD():
RESOURCE_DISK_PATH = "/dev/" + res_disk
else:
LOG.debug("resource disk is None")
+ # TODO Find where platform entropy data is surfaced
+ PLATFORM_ENTROPY_SOURCE = None
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START_BUILTIN,
@@ -242,6 +259,7 @@ def set_hostname(hostname, hostname_command='hostname'):
util.subp([hostname_command, hostname])
+@azure_ds_telemetry_reporter
@contextlib.contextmanager
def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
"""
@@ -269,11 +287,6 @@ class DataSourceAzure(sources.DataSource):
dsname = 'Azure'
_negotiated = False
_metadata_imds = sources.UNSET
- process_name = 'dhclient'
-
- tmpps = os.popen("ps -Af").read()
- if process_name not in tmpps[:]:
- call(['/sbin/dhclient', DEFAULT_PRIMARY_NIC])
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -293,6 +306,7 @@ class DataSourceAzure(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
+ @azure_ds_telemetry_reporter
def bounce_network_with_azure_hostname(self):
# When using cloud-init to provision, we have to set the hostname from
# the metadata and "bounce" the network to force DDNS to update via
@@ -318,6 +332,7 @@ class DataSourceAzure(sources.DataSource):
util.logexc(LOG, "handling set_hostname failed")
return False
+ @azure_ds_telemetry_reporter
def get_metadata_from_agent(self):
temp_hostname = self.metadata.get('local-hostname')
agent_cmd = self.ds_cfg['agent_command']
@@ -340,22 +355,25 @@ class DataSourceAzure(sources.DataSource):
for pk in self.cfg.get('_pubkeys', []):
if pk.get('value', None):
key_value = pk['value']
- LOG.debug("ssh authentication: using value from fabric")
+ LOG.debug("SSH authentication: using value from fabric")
else:
bname = str(pk['fingerprint'] + ".crt")
fp_files += [os.path.join(ddir, bname)]
- LOG.debug("ssh authentication: "
- "using fingerprint from fabirc")
-
- # wait very long for public SSH keys to arrive
- # https://bugs.launchpad.net/cloud-init/+bug/1717611
- missing = util.log_time(logfunc=LOG.debug,
- msg="waiting for SSH public key files",
- func=util.wait_for_files,
- args=(fp_files, 900))
-
- if len(missing):
- LOG.warning("Did not find files, but going on: %s", missing)
+ LOG.debug("SSH authentication: "
+ "using fingerprint from fabric")
+
+ with events.ReportEventStack(
+ name="waiting-for-ssh-public-key",
+ description="wait for agents to retrieve SSH keys",
+ parent=azure_ds_reporter):
+ # wait very long for public SSH keys to arrive
+ # https://bugs.launchpad.net/cloud-init/+bug/1717611
+ missing = util.log_time(logfunc=LOG.debug,
+ msg="waiting for SSH public key files",
+ func=util.wait_for_files,
+ args=(fp_files, 900))
+ if len(missing):
+ LOG.warning("Did not find files, but going on: %s", missing)
metadata = {}
metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
@@ -369,6 +387,7 @@ class DataSourceAzure(sources.DataSource):
subplatform_type = 'seed-dir'
return '%s (%s)' % (subplatform_type, self.seed)
+ @azure_ds_telemetry_reporter
def crawl_metadata(self):
"""Walk all instance metadata sources returning a dict on success.
@@ -399,19 +418,24 @@ class DataSourceAzure(sources.DataSource):
elif cdev.startswith("/dev/"):
if util.is_FreeBSD():
ret = util.mount_cb(cdev, load_azure_ds_dir,
- mtype="udf", sync=False)
+ mtype="udf")
else:
ret = util.mount_cb(cdev, load_azure_ds_dir)
else:
ret = load_azure_ds_dir(cdev)
except NonAzureDataSource:
+ report_diagnostic_event(
+ "Did not find Azure data source in %s" % cdev)
continue
except BrokenAzureDataSource as exc:
msg = 'BrokenAzureDataSource: %s' % exc
+ report_diagnostic_event(msg)
raise sources.InvalidMetaDataException(msg)
except util.MountFailedError:
- LOG.warning("%s was not mountable", cdev)
+ msg = '%s was not mountable' % cdev
+ report_diagnostic_event(msg)
+ LOG.warning(msg)
continue
perform_reprovision = reprovision or self._should_reprovision(ret)
@@ -419,10 +443,11 @@ class DataSourceAzure(sources.DataSource):
if util.is_FreeBSD():
msg = "Free BSD is not supported for PPS VMs"
LOG.error(msg)
+ report_diagnostic_event(msg)
raise sources.InvalidMetaDataException(msg)
ret = self._reprovision()
imds_md = get_metadata_from_imds(
- self.fallback_interface, retries=3)
+ self.fallback_interface, retries=10)
(md, userdata_raw, cfg, files) = ret
self.seed = cdev
crawled_data.update({
@@ -437,7 +462,9 @@ class DataSourceAzure(sources.DataSource):
break
if not found:
- raise sources.InvalidMetaDataException('No Azure metadata found')
+ msg = 'No Azure metadata found'
+ report_diagnostic_event(msg)
+ raise sources.InvalidMetaDataException(msg)
if found == ddir:
LOG.debug("using files cached in %s", ddir)
@@ -445,8 +472,7 @@ class DataSourceAzure(sources.DataSource):
seed = _get_random_seed()
if seed:
crawled_data['metadata']['random_seed'] = seed
- crawled_data['metadata']['instance-id'] = util.read_dmi_data(
- 'system-uuid')
+ crawled_data['metadata']['instance-id'] = self._iid()
if perform_reprovision:
LOG.info("Reporting ready to Azure after getting ReprovisionData")
@@ -456,9 +482,14 @@ class DataSourceAzure(sources.DataSource):
self._report_ready(lease=self._ephemeral_dhcp_ctx.lease)
self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
else:
- with EphemeralDHCPv4() as lease:
- self._report_ready(lease=lease)
-
+ try:
+ with EphemeralDHCPv4WithReporting(
+ azure_ds_reporter) as lease:
+ self._report_ready(lease=lease)
+ except Exception as e:
+ report_diagnostic_event(
+ "exception while reporting ready: %s" % e)
+ raise
return crawled_data
def _is_platform_viable(self):
@@ -470,6 +501,7 @@ class DataSourceAzure(sources.DataSource):
super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
self._metadata_imds = sources.UNSET
+ @azure_ds_telemetry_reporter
def _get_data(self):
"""Crawl and process datasource metadata caching metadata as attrs.
@@ -479,6 +511,16 @@ class DataSourceAzure(sources.DataSource):
if not self._is_platform_viable():
return False
try:
+ get_boot_telemetry()
+ except Exception as e:
+ LOG.warning("Failed to get boot telemetry: %s", e)
+
+ try:
+ get_system_info()
+ except Exception as e:
+ LOG.warning("Failed to get system information: %s", e)
+
+ try:
crawled_data = util.log_time(
logfunc=LOG.debug, msg='Crawl of metadata service',
func=self.crawl_metadata)
@@ -516,6 +558,17 @@ class DataSourceAzure(sources.DataSource):
# quickly (local check only) if self.instance_id is still valid
return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ def _iid(self, previous=None):
+ prev_iid_path = os.path.join(
+ self.paths.get_cpath('data'), 'instance-id')
+ iid = util.read_dmi_data('system-uuid')
+ if os.path.exists(prev_iid_path):
+ previous = util.load_file(prev_iid_path).strip()
+ if is_byte_swapped(previous, iid):
+ return previous
+ return iid
+
+ @azure_ds_telemetry_reporter
def setup(self, is_new_instance):
if self._negotiated is False:
LOG.debug("negotiating for %s (new_instance=%s)",
@@ -536,27 +589,55 @@ class DataSourceAzure(sources.DataSource):
headers = {"Metadata": "true"}
nl_sock = None
report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
+ self.imds_logging_threshold = 1
+ self.imds_poll_counter = 1
+ dhcp_attempts = 0
+ vnet_switched = False
+ return_val = None
def exc_cb(msg, exception):
if isinstance(exception, UrlError) and exception.code == 404:
+ if self.imds_poll_counter == self.imds_logging_threshold:
+ # Reducing the logging frequency as we are polling IMDS
+ self.imds_logging_threshold *= 2
+ LOG.debug("Call to IMDS with arguments %s failed "
+ "with status code %s after %s retries",
+ msg, exception.code, self.imds_poll_counter)
+ LOG.debug("Backing off logging threshold for the same "
+ "exception to %d", self.imds_logging_threshold)
+ self.imds_poll_counter += 1
return True
+
# If we get an exception while trying to call IMDS, we
# call DHCP and setup the ephemeral network to acquire the new IP.
+ LOG.debug("Call to IMDS with arguments %s failed with "
+ "status code %s", msg, exception.code)
+ report_diagnostic_event("polling IMDS failed with exception %s"
+ % exception.code)
return False
LOG.debug("Wait for vnetswitch to happen")
while True:
try:
- # Save our EphemeralDHCPv4 context so we avoid repeated dhcp
- self._ephemeral_dhcp_ctx = EphemeralDHCPv4()
- lease = self._ephemeral_dhcp_ctx.obtain_lease()
+ # Save our EphemeralDHCPv4 context to avoid repeated dhcp
+ with events.ReportEventStack(
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=azure_ds_reporter):
+ self._ephemeral_dhcp_ctx = EphemeralDHCPv4()
+ lease = self._ephemeral_dhcp_ctx.obtain_lease()
+
+ if vnet_switched:
+ dhcp_attempts += 1
if report_ready:
try:
nl_sock = netlink.create_bound_netlink_socket()
except netlink.NetlinkCreateSocketError as e:
+ report_diagnostic_event(e)
LOG.warning(e)
self._ephemeral_dhcp_ctx.clean_network()
- return
+ break
+
path = REPORTED_READY_MARKER_FILE
LOG.info(
"Creating a marker file to report ready: %s", path)
@@ -564,17 +645,33 @@ class DataSourceAzure(sources.DataSource):
pid=os.getpid(), time=time()))
self._report_ready(lease=lease)
report_ready = False
- try:
- netlink.wait_for_media_disconnect_connect(
- nl_sock, lease['interface'])
- except AssertionError as error:
- LOG.error(error)
- return
+
+ with events.ReportEventStack(
+ name="wait-for-media-disconnect-connect",
+ description="wait for vnet switch",
+ parent=azure_ds_reporter):
+ try:
+ netlink.wait_for_media_disconnect_connect(
+ nl_sock, lease['interface'])
+ except AssertionError as error:
+ report_diagnostic_event(error)
+ LOG.error(error)
+ break
+
+ vnet_switched = True
self._ephemeral_dhcp_ctx.clean_network()
else:
- return readurl(url, timeout=1, headers=headers,
- exception_cb=exc_cb, infinite=True,
- log_req_resp=False).contents
+ with events.ReportEventStack(
+ name="get-reprovision-data-from-imds",
+ description="get reprovision data from imds",
+ parent=azure_ds_reporter):
+ return_val = readurl(url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ exception_cb=exc_cb,
+ infinite=True,
+ log_req_resp=False).contents
+ break
except UrlError:
# Teardown our EphemeralDHCPv4 context on failure as we retry
self._ephemeral_dhcp_ctx.clean_network()
@@ -583,6 +680,15 @@ class DataSourceAzure(sources.DataSource):
if nl_sock:
nl_sock.close()
+ if vnet_switched:
+ report_diagnostic_event("attempted dhcp %d times after reuse" %
+ dhcp_attempts)
+ report_diagnostic_event("polled imds %d times after reuse" %
+ self.imds_poll_counter)
+
+ return return_val
+
+ @azure_ds_telemetry_reporter
def _report_ready(self, lease):
"""Tells the fabric provisioning has completed """
try:
@@ -620,9 +726,14 @@ class DataSourceAzure(sources.DataSource):
def _reprovision(self):
"""Initiate the reprovisioning workflow."""
contents = self._poll_imds()
- md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
-
+ with events.ReportEventStack(
+ name="reprovisioning-read-azure-ovf",
+ description="read azure ovf during reprovisioning",
+ parent=azure_ds_reporter):
+ md, ud, cfg = read_azure_ovf(contents)
+ return (md, ud, cfg, {'ovf-env.xml': contents})
+
+ @azure_ds_telemetry_reporter
def _negotiate(self):
"""Negotiate with fabric and return data from it.
@@ -633,9 +744,11 @@ class DataSourceAzure(sources.DataSource):
if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
self.bounce_network_with_azure_hostname()
+ pubkey_info = self.cfg.get('_pubkeys', None)
metadata_func = partial(get_metadata_from_fabric,
fallback_lease_file=self.
- dhclient_lease_file)
+ dhclient_lease_file,
+ pubkey_info=pubkey_info)
else:
metadata_func = self.get_metadata_from_agent
@@ -643,15 +756,20 @@ class DataSourceAzure(sources.DataSource):
self.ds_cfg['agent_command'])
try:
fabric_data = metadata_func()
- except Exception:
+ except Exception as e:
+ report_diagnostic_event(
+ "Error communicating with Azure fabric; You may experience "
+ "connectivity issues: %s" % e)
LOG.warning(
- "Error communicating with Azure fabric; You may experience."
+ "Error communicating with Azure fabric; You may experience "
"connectivity issues.", exc_info=True)
return False
+
util.del_file(REPORTED_READY_MARKER_FILE)
util.del_file(REPROVISION_MARKER_FILE)
return fabric_data
+ @azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
address_ephemeral_resize(is_new_instance=is_new_instance,
preserve_ntfs=self.ds_cfg.get(
@@ -659,6 +777,11 @@ class DataSourceAzure(sources.DataSource):
return
@property
+ def availability_zone(self):
+ return self.metadata.get(
+ 'imds', {}).get('compute', {}).get('platformFaultDomain')
+
+ @property
def network_config(self):
"""Generate a network config like net.generate_fallback_network() with
the following exceptions.
@@ -668,7 +791,7 @@ class DataSourceAzure(sources.DataSource):
2. Generate a fallback network config that does not include any of
the blacklisted devices.
"""
- if not self._network_config:
+ if not self._network_config or self._network_config == sources.UNSET:
if self.ds_cfg.get('apply_network_config'):
nc_src = self._metadata_imds
else:
@@ -676,6 +799,10 @@ class DataSourceAzure(sources.DataSource):
self._network_config = parse_network_config(nc_src)
return self._network_config
+ @property
+ def region(self):
+ return self.metadata.get('imds', {}).get('compute', {}).get('location')
+
def _partitions_on_device(devpath, maxnum=16):
# return a list of tuples (ptnum, path) for each part on devpath
@@ -690,12 +817,14 @@ def _partitions_on_device(devpath, maxnum=16):
return []
+@azure_ds_telemetry_reporter
def _has_ntfs_filesystem(devpath):
ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
LOG.debug('ntfs_devices found = %s', ntfs_devices)
return os.path.realpath(devpath) in ntfs_devices
+@azure_ds_telemetry_reporter
def can_dev_be_reformatted(devpath, preserve_ntfs):
"""Determine if the ephemeral drive at devpath should be reformatted.
@@ -744,43 +873,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
(cand_part, cand_path, devpath))
return False, msg
+ @azure_ds_telemetry_reporter
def count_files(mp):
ignored = set(['dataloss_warning_readme.txt'])
return len([f for f in os.listdir(mp) if f.lower() not in ignored])
bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
(cand_part, cand_path, devpath))
- try:
- file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
- update_env_for_mount={'LANG': 'C'})
- except util.MountFailedError as e:
- if "unknown filesystem type 'ntfs'" in str(e):
- return True, (bmsg + ' but this system cannot mount NTFS,'
- ' assuming there are no important files.'
- ' Formatting allowed.')
- return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
-
- if file_count != 0:
- LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
- 'to ensure that filesystem does not get wiped, set '
- '%s.%s in config', '.'.join(DS_CFG_PATH),
- DS_CFG_KEY_PRESERVE_NTFS)
- return False, bmsg + ' but had %d files on it.' % file_count
+
+ with events.ReportEventStack(
+ name="mount-ntfs-and-count",
+ description="mount-ntfs-and-count",
+ parent=azure_ds_reporter) as evt:
+ try:
+ file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
+ update_env_for_mount={'LANG': 'C'})
+ except util.MountFailedError as e:
+ evt.description = "cannot mount ntfs"
+ if "unknown filesystem type 'ntfs'" in str(e):
+ return True, (bmsg + ' but this system cannot mount NTFS,'
+ ' assuming there are no important files.'
+ ' Formatting allowed.')
+ return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
+
+ if file_count != 0:
+ evt.description = "mounted and counted %d files" % file_count
+ LOG.warning("it looks like you're using NTFS on the ephemeral"
+ " disk, to ensure that filesystem does not get wiped,"
+ " set %s.%s in config", '.'.join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS)
+ return False, bmsg + ' but had %d files on it.' % file_count
return True, bmsg + ' and had no important files. Safe for reformatting.'
+@azure_ds_telemetry_reporter
def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
is_new_instance=False, preserve_ntfs=False):
# wait for ephemeral disk to come up
naplen = .2
- missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
- log_pre="Azure ephemeral disk: ")
-
- if missing:
- LOG.warning("ephemeral device '%s' did not appear after %d seconds.",
- devpath, maxwait)
- return
+ with events.ReportEventStack(
+ name="wait-for-ephemeral-disk",
+ description="wait for ephemeral disk",
+ parent=azure_ds_reporter):
+ missing = util.wait_for_files([devpath],
+ maxwait=maxwait,
+ naplen=naplen,
+ log_pre="Azure ephemeral disk: ")
+
+ if missing:
+ LOG.warning("ephemeral device '%s' did"
+ " not appear after %d seconds.",
+ devpath, maxwait)
+ return
result = False
msg = None
@@ -808,6 +953,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
return
+@azure_ds_telemetry_reporter
def perform_hostname_bounce(hostname, cfg, prev_hostname):
# set the hostname to 'hostname' if it is not already set to that.
# then, if policy is not off, bounce the interface using command
@@ -843,6 +989,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
return True
+@azure_ds_telemetry_reporter
def crtfile_to_pubkey(fname, data=None):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
@@ -851,6 +998,7 @@ def crtfile_to_pubkey(fname, data=None):
return out.rstrip()
+@azure_ds_telemetry_reporter
def pubkeys_from_crt_files(flist):
pubkeys = []
errors = []
@@ -866,6 +1014,7 @@ def pubkeys_from_crt_files(flist):
return pubkeys
+@azure_ds_telemetry_reporter
def write_files(datadir, files, dirmode=None):
def _redact_password(cnt, fname):
@@ -893,6 +1042,7 @@ def write_files(datadir, files, dirmode=None):
util.write_file(filename=fname, content=content, mode=0o600)
+@azure_ds_telemetry_reporter
def invoke_agent(cmd):
# this is a function itself to simplify patching it for test
if cmd:
@@ -912,16 +1062,19 @@ def find_child(node, filter_func):
return ret
+@azure_ds_telemetry_reporter
def load_azure_ovf_pubkeys(sshnode):
# This parses a 'SSH' node formatted like below, and returns
# an array of dicts.
- # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
- # 'path': 'where/to/go'}]
+ # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
+ # 'path': '/where/to/go'}]
#
# <SSH><PublicKeys>
- # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path>
+ # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/x/y/z</Path>
# ...
# </PublicKeys></SSH>
+ # Under some circumstances, there may be a <Value> element along with the
+ # Fingerprint and Path. Pass those along if they appear.
results = find_child(sshnode, lambda n: n.localName == "PublicKeys")
if len(results) == 0:
return []
@@ -962,11 +1115,14 @@ def load_azure_ovf_pubkeys(sshnode):
return found
+@azure_ds_telemetry_reporter
def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
except Exception as e:
- raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e)
+ error_str = "Invalid ovf-env.xml: %s" % e
+ report_diagnostic_event(error_str)
+ raise BrokenAzureDataSource(error_str)
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
@@ -986,8 +1142,8 @@ def read_azure_ovf(contents):
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
if len(lpcs_nodes) > 1:
raise BrokenAzureDataSource("found '%d' %ss" %
- ("LinuxProvisioningConfigurationSet",
- len(lpcs_nodes)))
+ (len(lpcs_nodes),
+ "LinuxProvisioningConfigurationSet"))
lpcs = lpcs_nodes[0]
if not lpcs.hasChildNodes():
@@ -1047,9 +1203,10 @@ def read_azure_ovf(contents):
defuser = {}
if username:
defuser['name'] = username
- if password and DEF_PASSWD_REDACTION != password:
- defuser['passwd'] = encrypt_pass(password)
+ if password:
defuser['lock_passwd'] = False
+ if DEF_PASSWD_REDACTION != password:
+ defuser['passwd'] = encrypt_pass(password)
if defuser:
cfg['system_info'] = {'default_user': defuser}
@@ -1062,6 +1219,7 @@ def read_azure_ovf(contents):
return (md, ud, cfg)
+@azure_ds_telemetry_reporter
def _extract_preprovisioned_vm_setting(dom):
"""Read the preprovision flag from the ovf. It should not
exist unless true."""
@@ -1090,6 +1248,7 @@ def encrypt_pass(password, salt_id="$6$"):
return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
+@azure_ds_telemetry_reporter
def _check_freebsd_cdrom(cdrom_dev):
"""Return boolean indicating path to cdrom device has content."""
try:
@@ -1101,18 +1260,31 @@ def _check_freebsd_cdrom(cdrom_dev):
return False
-def _get_random_seed():
+@azure_ds_telemetry_reporter
+def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
"""Return content random seed file if available, otherwise,
return None."""
# azure / hyper-v provides random data here
- # TODO. find the seed on FreeBSD platform
# now update ds_cfg to reflect contents pass in config
- if util.is_FreeBSD():
+ if source is None:
return None
- return util.load_file("/sys/firmware/acpi/tables/OEM0",
- quiet=True, decode=False)
+ seed = util.load_file(source, quiet=True, decode=False)
+
+ # The seed generally contains non-Unicode characters. load_file puts
+ # them into a str (in python 2) or bytes (in python 3). In python 2,
+ # bad octets in a str cause util.json_dumps() to throw an exception. In
+ # python 3, bytes is a non-serializable type, and the handler load_file
+ # uses applies b64 encoding *again* to handle it. The simplest solution
+ # is to just b64encode the data and then decode it to a serializable
+ # string. Same number of bits of entropy, just with 25% more zeroes.
+ # There's no need to undo this base64-encoding when the random seed is
+ # actually used in cc_seed_random.py.
+ seed = base64.b64encode(seed).decode()
+ return seed
+
+@azure_ds_telemetry_reporter
def list_possible_azure_ds_devs():
devlist = []
if util.is_FreeBSD():
@@ -1127,6 +1299,7 @@ def list_possible_azure_ds_devs():
return devlist
+@azure_ds_telemetry_reporter
def load_azure_ds_dir(source_dir):
ovf_file = os.path.join(source_dir, "ovf-env.xml")
@@ -1149,47 +1322,62 @@ def parse_network_config(imds_metadata):
@param: imds_metadata: Dict of content read from IMDS network service.
@return: Dictionary containing network version 2 standard configuration.
"""
- if imds_metadata != sources.UNSET and imds_metadata:
- netconfig = {'version': 2, 'ethernets': {}}
- LOG.debug('Azure: generating network configuration from IMDS')
- network_metadata = imds_metadata['network']
- for idx, intf in enumerate(network_metadata['interface']):
- nicname = 'eth{idx}'.format(idx=idx)
- dev_config = {}
- for addr4 in intf['ipv4']['ipAddress']:
- privateIpv4 = addr4['privateIpAddress']
- if privateIpv4:
- if dev_config.get('dhcp4', False):
- # Append static address config for nic > 1
- netPrefix = intf['ipv4']['subnet'][0].get(
- 'prefix', '24')
+ with events.ReportEventStack(
+ name="parse_network_config",
+ description="",
+ parent=azure_ds_reporter) as evt:
+ if imds_metadata != sources.UNSET and imds_metadata:
+ netconfig = {'version': 2, 'ethernets': {}}
+ LOG.debug('Azure: generating network configuration from IMDS')
+ network_metadata = imds_metadata['network']
+ for idx, intf in enumerate(network_metadata['interface']):
+ # First IPv4 and/or IPv6 address will be obtained via DHCP.
+ # Any additional IPs of each type will be set as static
+ # addresses.
+ nicname = 'eth{idx}'.format(idx=idx)
+ dhcp_override = {'route-metric': (idx + 1) * 100}
+ dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
+ 'dhcp6': False}
+ for addr_type in ('ipv4', 'ipv6'):
+ addresses = intf.get(addr_type, {}).get('ipAddress', [])
+ if addr_type == 'ipv4':
+ default_prefix = '24'
+ else:
+ default_prefix = '128'
+ if addresses:
+ dev_config['dhcp6'] = True
+ # non-primary interfaces should have a higher
+ # route-metric (cost) so default routes prefer
+ # primary nic due to lower route-metric value
+ dev_config['dhcp6-overrides'] = dhcp_override
+ for addr in addresses[1:]:
+ # Append static address config for ip > 1
+ netPrefix = intf[addr_type]['subnet'][0].get(
+ 'prefix', default_prefix)
+ privateIp = addr['privateIpAddress']
if not dev_config.get('addresses'):
dev_config['addresses'] = []
dev_config['addresses'].append(
'{ip}/{prefix}'.format(
- ip=privateIpv4, prefix=netPrefix))
- else:
- dev_config['dhcp4'] = True
- for addr6 in intf['ipv6']['ipAddress']:
- privateIpv6 = addr6['privateIpAddress']
- if privateIpv6:
- dev_config['dhcp6'] = True
- break
- if dev_config:
- mac = ':'.join(re.findall(r'..', intf['macAddress']))
- dev_config.update(
- {'match': {'macaddress': mac.lower()},
- 'set-name': nicname})
- netconfig['ethernets'][nicname] = dev_config
- else:
- blacklist = ['mlx4_core']
- LOG.debug('Azure: generating fallback configuration')
- # generate a network config, blacklist picking mlx4_core devs
- netconfig = net.generate_fallback_config(
- blacklist_drivers=blacklist, config_driver=True)
- return netconfig
+ ip=privateIp, prefix=netPrefix))
+ if dev_config:
+ mac = ':'.join(re.findall(r'..', intf['macAddress']))
+ dev_config.update(
+ {'match': {'macaddress': mac.lower()},
+ 'set-name': nicname})
+ netconfig['ethernets'][nicname] = dev_config
+ evt.description = "network config from imds"
+ else:
+ blacklist = ['mlx4_core']
+ LOG.debug('Azure: generating fallback configuration')
+ # generate a network config, blacklist picking mlx4_core devs
+ netconfig = net.generate_fallback_config(
+ blacklist_drivers=blacklist, config_driver=True)
+ evt.description = "network config from fallback"
+ return netconfig
+@azure_ds_telemetry_reporter
def get_metadata_from_imds(fallback_nic, retries):
"""Query Azure's network metadata service, returning a dictionary.
@@ -1210,29 +1398,39 @@ def get_metadata_from_imds(fallback_nic, retries):
if net.is_up(fallback_nic):
return util.log_time(**kwargs)
else:
- with EphemeralDHCPv4(fallback_nic):
- return util.log_time(**kwargs)
+ try:
+ with EphemeralDHCPv4WithReporting(
+ azure_ds_reporter, fallback_nic):
+ return util.log_time(**kwargs)
+ except Exception as e:
+ report_diagnostic_event("exception while getting metadata: %s" % e)
+ raise
+@azure_ds_telemetry_reporter
def _get_metadata_from_imds(retries):
url = IMDS_URL + "instance?api-version=2017-12-01"
headers = {"Metadata": "true"}
try:
response = readurl(
- url, timeout=1, headers=headers, retries=retries,
- exception_cb=retry_on_url_exc)
+ url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
+ retries=retries, exception_cb=retry_on_url_exc)
except Exception as e:
- LOG.debug('Ignoring IMDS instance metadata: %s', e)
+ msg = 'Ignoring IMDS instance metadata: %s' % e
+ report_diagnostic_event(msg)
+ LOG.debug(msg)
return {}
try:
return util.load_json(str(response))
- except json.decoder.JSONDecodeError:
+ except json.decoder.JSONDecodeError as e:
+ report_diagnostic_event('non-json imds response' % e)
LOG.warning(
'Ignoring non-json IMDS instance metadata: %s', str(response))
return {}
+@azure_ds_telemetry_reporter
def maybe_remove_ubuntu_network_config_scripts(paths=None):
"""Remove Azure-specific ubuntu network config for non-primary nics.
@@ -1270,14 +1468,22 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
def _is_platform_viable(seed_dir):
- """Check platform environment to report if this datasource may run."""
- asset_tag = util.read_dmi_data('chassis-asset-tag')
- if asset_tag == AZURE_CHASSIS_ASSET_TAG:
- return True
- LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
- if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
- return True
- return False
+ with events.ReportEventStack(
+ name="check-platform-viability",
+ description="found azure asset tag",
+ parent=azure_ds_reporter) as evt:
+
+ """Check platform environment to report if this datasource may run."""
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ if asset_tag == AZURE_CHASSIS_ASSET_TAG:
+ return True
+ msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag
+ LOG.debug(msg)
+ evt.description = msg
+ report_diagnostic_event(msg)
+ if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+ return True
+ return False
class BrokenAzureDataSource(Exception):
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 2955d3f0..df88f677 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -42,12 +42,8 @@ class DataSourceCloudSigma(sources.DataSource):
if not sys_product_name:
LOG.debug("system-product-name not available in dmi data")
return False
- else:
- LOG.debug("detected hypervisor as %s", sys_product_name)
- return 'cloudsigma' in sys_product_name.lower()
-
- LOG.warning("failed to query dmi data for system product name")
- return False
+ LOG.debug("detected hypervisor as %s", sys_product_name)
+ return 'cloudsigma' in sys_product_name.lower()
def _get_data(self):
"""
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index d4b758f2..2013bed7 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -13,7 +13,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-from socket import inet_ntoa
+from socket import inet_ntoa, getaddrinfo, gaierror
from struct import pack
import time
@@ -93,9 +93,9 @@ class DataSourceCloudStack(sources.DataSource):
urls = [uhelp.combine_url(self.metadata_address,
'latest/meta-data/instance-id')]
start_time = time.time()
- url = uhelp.wait_for_url(
+ url, _response = uhelp.wait_for_url(
urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warn)
+ timeout=url_params.timeout_seconds, status_cb=LOG.warning)
if url:
LOG.debug("Using metadata source: '%s'", url)
@@ -156,6 +156,17 @@ class DataSourceCloudStack(sources.DataSource):
return self.metadata['availability-zone']
+def get_data_server():
+ # Returns the metadataserver from dns
+ try:
+ addrinfo = getaddrinfo("data-server.", 80)
+ except gaierror:
+ LOG.debug("DNS Entry data-server not found")
+ return None
+ else:
+ return addrinfo[0][4][0] # return IP
+
+
def get_default_gateway():
# Returns the default gateway ip address in the dotted format.
lines = util.load_file("/proc/net/route").splitlines()
@@ -218,7 +229,14 @@ def get_vr_address():
# If no virtual router is detected, fallback on default gateway.
# See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa
- # Try networkd first...
+ # Try data-server DNS entry first
+ latest_address = get_data_server()
+ if latest_address:
+ LOG.debug("Found metadata server '%s' via data-server DNS entry",
+ latest_address)
+ return latest_address
+
+ # Try networkd second...
latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS')
if latest_address:
LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases",
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 564e3eb3..f77923c2 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -72,15 +72,12 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
dslist = self.sys_cfg.get('datasource_list')
for dev in find_candidate_devs(dslist=dslist):
try:
- # Set mtype if freebsd and turn off sync
- if dev.startswith("/dev/cd"):
+ if util.is_FreeBSD() and dev.startswith("/dev/cd"):
mtype = "cd9660"
- sync = False
else:
mtype = None
- sync = True
results = util.mount_cb(dev, read_config_drive,
- mtype=mtype, sync=sync)
+ mtype=mtype)
found = dev
except openstack.NonReadable:
pass
@@ -166,10 +163,10 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.seed_dir in self.source:
- subplatform_type = 'seed-dir'
- elif self.source.startswith('/dev'):
+ if self.source.startswith('/dev'):
subplatform_type = 'config-disk'
+ else:
+ subplatform_type = 'seed-dir'
return '%s (%s)' % (subplatform_type, self.source)
@@ -237,7 +234,7 @@ def find_candidate_devs(probe_optical=True, dslist=None):
config drive v2:
Disk should be:
- * either vfat or iso9660 formated
+ * either vfat or iso9660 formatted
* labeled with 'config-2' or 'CONFIG-2'
"""
if dslist is None:
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index b49a08db..0f2bfef4 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -10,7 +10,6 @@
import os
import time
-from subprocess import call
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
@@ -20,6 +19,7 @@ from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
from cloudinit import warnings
+from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
@@ -27,13 +27,21 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
-DEFAULT_PRIMARY_NIC = 'eth0'
+
+API_TOKEN_ROUTE = 'latest/api/token'
+API_TOKEN_DISABLED = '_ec2_disable_api_token'
+AWS_TOKEN_TTL_SECONDS = '21600'
+AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
+AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
+AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER]
class CloudNames(object):
ALIYUN = "aliyun"
AWS = "aws"
BRIGHTBOX = "brightbox"
+ ZSTACK = "zstack"
+ E24CLOUD = "e24cloud"
# UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false',
# then an attempt at the Ec2 Metadata service will be made.
UNKNOWN = "unknown"
@@ -45,12 +53,6 @@ class CloudNames(object):
class DataSourceEc2(sources.DataSource):
dsname = 'Ec2'
- process_name = 'dhclient'
-
- tmpps = os.popen("ps -Af").read()
- if process_name not in tmpps[:]:
- call(['/sbin/dhclient', DEFAULT_PRIMARY_NIC])
-
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
@@ -67,6 +69,7 @@ class DataSourceEc2(sources.DataSource):
url_max_wait = 120
url_timeout = 50
+ _api_token = None # API token for accessing the metadata service
_network_config = sources.UNSET # Used to cache calculated network cfg v1
# Whether we want to get network configuration from the metadata service.
@@ -115,6 +118,19 @@ class DataSourceEc2(sources.DataSource):
'dynamic', {}).get('instance-identity', {}).get('document', {})
return True
+ def is_classic_instance(self):
+ """Report if this instance type is Ec2 Classic (non-vpc)."""
+ if not self.metadata:
+ # Can return False on inconclusive as we are also called in
+ # network_config where metadata will be present.
+ # Secondary call site is in packaging postinst script.
+ return False
+ ifaces_md = self.metadata.get('network', {}).get('interfaces', {})
+ for _mac, mac_data in ifaces_md.get('macs', {}).items():
+ if 'vpc-id' in mac_data:
+ return False
+ return True
+
@property
def launch_index(self):
if not self.metadata:
@@ -140,11 +156,13 @@ class DataSourceEc2(sources.DataSource):
min_metadata_version.
"""
# Assumes metadata service is already up
+ url_tmpl = '{0}/{1}/meta-data/instance-id'
+ headers = self._get_headers()
for api_ver in self.extended_metadata_versions:
- url = '{0}/{1}/meta-data/instance-id'.format(
- self.metadata_address, api_ver)
+ url = url_tmpl.format(self.metadata_address, api_ver)
try:
- resp = uhelp.readurl(url=url)
+ resp = uhelp.readurl(url=url, headers=headers,
+ headers_redact=AWS_TOKEN_REDACT)
except uhelp.UrlError as e:
LOG.debug('url %s raised exception %s', url, e)
else:
@@ -164,12 +182,41 @@ class DataSourceEc2(sources.DataSource):
# setup self.identity. So we need to do that now.
api_version = self.get_metadata_api_version()
self.identity = ec2.get_instance_identity(
- api_version, self.metadata_address).get('document', {})
+ api_version, self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=AWS_TOKEN_REDACT,
+ exception_cb=self._refresh_stale_aws_token_cb).get(
+ 'document', {})
return self.identity.get(
'instanceId', self.metadata['instance-id'])
else:
return self.metadata['instance-id']
+ def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
+ if self.cloud_name != CloudNames.AWS:
+ return
+
+ urls = []
+ url2base = {}
+ url_path = API_TOKEN_ROUTE
+ request_method = 'PUT'
+ for url in mdurls:
+ cur = '{0}/{1}'.format(url, url_path)
+ urls.append(cur)
+ url2base[cur] = url
+
+ # use the self._status_cb to check for Read errors, which means
+ # we can't reach the API token URL, so we should disable IMDSv2
+ LOG.debug('Fetching Ec2 IMDSv2 API Token')
+ url, response = uhelp.wait_for_url(
+ urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb,
+ headers_cb=self._get_headers, request_method=request_method,
+ headers_redact=AWS_TOKEN_REDACT)
+
+ if url and response:
+ self._api_token = response
+ return url2base[url]
+
def wait_for_metadata_service(self):
mcfg = self.ds_cfg
@@ -191,27 +238,40 @@ class DataSourceEc2(sources.DataSource):
LOG.warning("Empty metadata url list! using default list")
mdurls = self.metadata_urls
- urls = []
- url2base = {}
- for url in mdurls:
- cur = '{0}/{1}/meta-data/instance-id'.format(
- url, self.min_metadata_version)
- urls.append(cur)
- url2base[cur] = url
-
- start_time = time.time()
- url = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warn)
-
- if url:
- self.metadata_address = url2base[url]
+ # try the api token path first
+ metadata_address = self._maybe_fetch_api_token(mdurls)
+ if not metadata_address:
+ if self._api_token == API_TOKEN_DISABLED:
+ LOG.warning('Retrying with IMDSv1')
+ # if we can't get a token, use instance-id path
+ urls = []
+ url2base = {}
+ url_path = '{ver}/meta-data/instance-id'.format(
+ ver=self.min_metadata_version)
+ request_method = 'GET'
+ for url in mdurls:
+ cur = '{0}/{1}'.format(url, url_path)
+ urls.append(cur)
+ url2base[cur] = url
+
+ start_time = time.time()
+ url, _ = uhelp.wait_for_url(
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers,
+ request_method=request_method)
+
+ if url:
+ metadata_address = url2base[url]
+
+ if metadata_address:
+ self.metadata_address = metadata_address
LOG.debug("Using metadata source: '%s'", self.metadata_address)
else:
LOG.critical("Giving up on md from %s after %s seconds",
urls, int(time.time() - start_time))
- return bool(url)
+ return bool(metadata_address)
def device_name_to_device(self, name):
# Consult metadata service, that has
@@ -328,6 +388,17 @@ class DataSourceEc2(sources.DataSource):
if isinstance(net_md, dict):
result = convert_ec2_metadata_network_config(
net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
+
+ # RELEASE_BLOCKER: xenial should drop the below if statement,
+ # because the issue being addressed doesn't exist pre-netplan.
+ # (This datasource doesn't implement check_instance_id() so the
+ # datasource object is recreated every boot; this means we don't
+ # need to modify update_events on cloud-init upgrade.)
+
+ # Non-VPC (aka Classic) Ec2 instances need to rewrite the
+ # network config file every boot due to MAC address change.
+ if self.is_classic_instance():
+ self.update_events['network'].add(EventType.BOOT)
else:
LOG.warning("Metadata 'network' key not valid: %s.", net_md)
self._network_config = result
@@ -356,15 +427,27 @@ class DataSourceEc2(sources.DataSource):
if not self.wait_for_metadata_service():
return {}
api_version = self.get_metadata_api_version()
+ redact = AWS_TOKEN_REDACT
crawled_metadata = {}
+ if self.cloud_name == CloudNames.AWS:
+ exc_cb = self._refresh_stale_aws_token_cb
+ exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb
+ else:
+ exc_cb = exc_cb_ud = None
try:
crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version, self.metadata_address)
+ api_version, self.metadata_address,
+ headers_cb=self._get_headers, headers_redact=redact,
+ exception_cb=exc_cb_ud)
crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version, self.metadata_address)
+ api_version, self.metadata_address,
+ headers_cb=self._get_headers, headers_redact=redact,
+ exception_cb=exc_cb)
if self.cloud_name == CloudNames.AWS:
identity = ec2.get_instance_identity(
- api_version, self.metadata_address)
+ api_version, self.metadata_address,
+ headers_cb=self._get_headers, headers_redact=redact,
+ exception_cb=exc_cb)
crawled_metadata['dynamic'] = {'instance-identity': identity}
except Exception:
util.logexc(
@@ -374,6 +457,73 @@ class DataSourceEc2(sources.DataSource):
crawled_metadata['_metadata_api_version'] = api_version
return crawled_metadata
+ def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS):
+ """Request new metadata API token.
+ @param seconds: The lifetime of the token in seconds
+
+ @return: The API token or None if unavailable.
+ """
+ if self.cloud_name != CloudNames.AWS:
+ return None
+ LOG.debug("Refreshing Ec2 metadata API token")
+ request_header = {AWS_TOKEN_REQ_HEADER: seconds}
+ token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE)
+ try:
+ response = uhelp.readurl(token_url, headers=request_header,
+ headers_redact=AWS_TOKEN_REDACT,
+ request_method="PUT")
+ except uhelp.UrlError as e:
+ LOG.warning(
+ 'Unable to get API token: %s raised exception %s',
+ token_url, e)
+ return None
+ return response.contents
+
+ def _skip_or_refresh_stale_aws_token_cb(self, msg, exception):
+ """Callback will not retry on SKIP_USERDATA_CODES or if no token
+ is available."""
+ retry = ec2.skip_retry_on_codes(
+ ec2.SKIP_USERDATA_CODES, msg, exception)
+ if not retry:
+ return False # False raises exception
+ return self._refresh_stale_aws_token_cb(msg, exception)
+
+ def _refresh_stale_aws_token_cb(self, msg, exception):
+ """Exception handler for Ec2 to refresh token if token is stale."""
+ if isinstance(exception, uhelp.UrlError) and exception.code == 401:
+ # With _api_token as None, _get_headers will _refresh_api_token.
+ LOG.debug("Clearing cached Ec2 API token due to expiry")
+ self._api_token = None
+ return True # always retry
+
+ def _status_cb(self, msg, exc=None):
+ LOG.warning(msg)
+ if 'Read timed out' in msg:
+ LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1')
+ self._api_token = API_TOKEN_DISABLED
+
+ def _get_headers(self, url=''):
+ """Return a dict of headers for accessing a url.
+
+ If _api_token is unset on AWS, attempt to refresh the token via a PUT
+ and then return the updated token header.
+ """
+ if self.cloud_name != CloudNames.AWS or (self._api_token ==
+ API_TOKEN_DISABLED):
+ return {}
+ # Request a 6 hour token if URL is API_TOKEN_ROUTE
+ request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS}
+ if API_TOKEN_ROUTE in url:
+ return request_token_header
+ if not self._api_token:
+ # If we don't yet have an API token, get one via a PUT against
+ # API_TOKEN_ROUTE. This _api_token may get unset by a 403 due
+ # to an invalid or expired token
+ self._api_token = self._refresh_api_token()
+ if not self._api_token:
+ return {}
+ return {AWS_TOKEN_PUT_HEADER: self._api_token}
+
class DataSourceEc2Local(DataSourceEc2):
"""Datasource run at init-local which sets up network to query metadata.
@@ -450,20 +600,31 @@ def identify_aws(data):
if (data['uuid'].startswith('ec2') and
(data['uuid_source'] == 'hypervisor' or
data['uuid'] == data['serial'])):
- return CloudNames.AWS
+ return CloudNames.AWS
return None
def identify_brightbox(data):
- if data['serial'].endswith('brightbox.com'):
+ if data['serial'].endswith('.brightbox.com'):
return CloudNames.BRIGHTBOX
+def identify_zstack(data):
+ if data['asset_tag'].endswith('.zstack.io'):
+ return CloudNames.ZSTACK
+
+
+def identify_e24cloud(data):
+ if data['vendor'] == 'e24cloud':
+ return CloudNames.E24CLOUD
+
+
def identify_platform():
# identify the platform and return an entry in CloudNames.
data = _collect_platform_data()
- checks = (identify_aws, identify_brightbox, lambda x: CloudNames.UNKNOWN)
+ checks = (identify_aws, identify_brightbox, identify_zstack,
+ identify_e24cloud, lambda x: CloudNames.UNKNOWN)
for checker in checks:
try:
result = checker(data)
@@ -481,6 +642,8 @@ def _collect_platform_data():
uuid: system-uuid from dmi or /sys/hypervisor
uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi'
serial: dmi 'system-serial-number' (/sys/.../product_serial)
+ asset_tag: 'dmidecode -s chassis-asset-tag'
+ vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor)
On Ec2 instances experimentation is that product_serial is upper case,
and product_uuid is lower case. This returns lower case values for both.
@@ -503,6 +666,15 @@ def _collect_platform_data():
data['serial'] = serial.lower()
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ if asset_tag is None:
+ asset_tag = ''
+
+ data['asset_tag'] = asset_tag.lower()
+
+ vendor = util.read_dmi_data('system-manufacturer')
+ data['vendor'] = (vendor if vendor else '').lower()
+
return data
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
new file mode 100644
index 00000000..d59aefd1
--- /dev/null
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -0,0 +1,268 @@
+# Author: Mathieu Corbin <mathieu.corbin@exoscale.com>
+# Author: Christopher Glass <christopher.glass@exoscale.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import ec2_utils as ec2
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import helpers
+from cloudinit import url_helper
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+METADATA_URL = "http://169.254.169.254"
+API_VERSION = "1.0"
+PASSWORD_SERVER_PORT = 8080
+
+URL_TIMEOUT = 10
+URL_RETRIES = 6
+
+EXOSCALE_DMI_NAME = "Exoscale"
+
+
+class DataSourceExoscale(sources.DataSource):
+
+ dsname = 'Exoscale'
+
+ url_max_wait = 120
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths)
+ LOG.debug("Initializing the Exoscale datasource")
+
+ self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL)
+ self.api_version = self.ds_cfg.get('api_version', API_VERSION)
+ self.password_server_port = int(
+ self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT))
+ self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT)
+ self.url_retries = self.ds_cfg.get('retries', URL_RETRIES)
+ self.extra_config = {}
+
+ def activate(self, cfg, is_new_instance):
+ """Adjust set-passwords module to run 'always' during each boot"""
+ # We run the set password config module on every boot in order to
+ # enable resetting the instance's password via the exoscale console
+ # (and a subsequent instance reboot).
+ # Exoscale password server only provides set-passwords user-data if
+ # a user has triggered a password reset. So calling that password
+ # service generally results in no additional cloud-config.
+ # TODO(Create util functions for overriding merged sys_cfg module freq)
+ mod = 'set_passwords'
+ sem_path = self.paths.get_ipath_cur('sem')
+ sem_helper = helpers.FileSemaphores(sem_path)
+ if sem_helper.clear('config_' + mod, None):
+ LOG.debug('Overriding module set-passwords with frequency always')
+
+ def wait_for_metadata_service(self):
+ """Wait for the metadata service to be reachable."""
+
+ metadata_url = "{}/{}/meta-data/instance-id".format(
+ self.metadata_url, self.api_version)
+
+ url, _response = url_helper.wait_for_url(
+ urls=[metadata_url],
+ max_wait=self.url_max_wait,
+ timeout=self.url_timeout,
+ status_cb=LOG.critical)
+
+ return bool(url)
+
+ def crawl_metadata(self):
+ """
+ Crawl the metadata service when available.
+
+ @returns: Dictionary of crawled metadata content.
+ """
+ metadata_ready = util.log_time(
+ logfunc=LOG.info,
+ msg='waiting for the metadata service',
+ func=self.wait_for_metadata_service)
+
+ if not metadata_ready:
+ return {}
+
+ return read_metadata(self.metadata_url, self.api_version,
+ self.password_server_port, self.url_timeout,
+ self.url_retries)
+
+ def _get_data(self):
+ """Fetch the user data, the metadata and the VM password
+ from the metadata service.
+
+ Please refer to the datasource documentation for details on how the
+ metadata server and password server are crawled.
+ """
+ if not self._is_platform_viable():
+ return False
+
+ data = util.log_time(
+ logfunc=LOG.debug,
+ msg='Crawl of metadata service',
+ func=self.crawl_metadata)
+
+ if not data:
+ return False
+
+ self.userdata_raw = data['user-data']
+ self.metadata = data['meta-data']
+ password = data.get('password')
+
+ password_config = {}
+ if password:
+ # Since we have a password, let's make sure we are allowed to use
+ # it by allowing ssh_pwauth.
+ # The password module's default behavior is to leave the
+ # configuration as-is in this regard, so that means it will either
+ # leave the password always disabled if no password is ever set, or
+ # leave the password login enabled if we set it once.
+ password_config = {
+ 'ssh_pwauth': True,
+ 'password': password,
+ 'chpasswd': {
+ 'expire': False,
+ },
+ }
+
+ # builtin extra_config overrides password_config
+ self.extra_config = util.mergemanydict(
+ [self.extra_config, password_config])
+
+ return True
+
+ def get_config_obj(self):
+ return self.extra_config
+
+ def _is_platform_viable(self):
+ return util.read_dmi_data('system-product-name').startswith(
+ EXOSCALE_DMI_NAME)
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+def get_password(metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES):
+ """Obtain the VM's password if set.
+
+ Once fetched the password is marked saved. Future calls to this method may
+ return empty string or 'saved_password'."""
+ password_url = "{}:{}/{}/".format(metadata_url, password_server_port,
+ api_version)
+ response = url_helper.read_file_or_url(
+ password_url,
+ ssl_details=None,
+ headers={"DomU_Request": "send_my_password"},
+ timeout=url_timeout,
+ retries=url_retries)
+ password = response.contents.decode('utf-8')
+ # the password is empty or already saved
+ # Note: the original metadata server would answer an additional
+ # 'bad_request' status, but the Exoscale implementation does not.
+ if password in ['', 'saved_password']:
+ return None
+ # save the password
+ url_helper.read_file_or_url(
+ password_url,
+ ssl_details=None,
+ headers={"DomU_Request": "saved_password"},
+ timeout=url_timeout,
+ retries=url_retries)
+ return password
+
+
+def read_metadata(metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES):
+ """Query the metadata server and return the retrieved data."""
+ crawled_metadata = {}
+ crawled_metadata['_metadata_api_version'] = api_version
+ try:
+ crawled_metadata['user-data'] = ec2.get_instance_userdata(
+ api_version,
+ metadata_url,
+ timeout=url_timeout,
+ retries=url_retries)
+ crawled_metadata['meta-data'] = ec2.get_instance_metadata(
+ api_version,
+ metadata_url,
+ timeout=url_timeout,
+ retries=url_retries)
+ except Exception as e:
+ util.logexc(LOG, "failed reading from metadata url %s (%s)",
+ metadata_url, e)
+ return {}
+
+ try:
+ crawled_metadata['password'] = get_password(
+ api_version=api_version,
+ metadata_url=metadata_url,
+ password_server_port=password_server_port,
+ url_retries=url_retries,
+ url_timeout=url_timeout)
+ except Exception as e:
+ util.logexc(LOG, "failed to read from password server url %s:%s (%s)",
+ metadata_url, password_server_port, e)
+
+ return crawled_metadata
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(description='Query Exoscale Metadata')
+ parser.add_argument(
+ "--endpoint",
+ metavar="URL",
+ help="The url of the metadata service.",
+ default=METADATA_URL)
+ parser.add_argument(
+ "--version",
+ metavar="VERSION",
+ help="The version of the metadata endpoint to query.",
+ default=API_VERSION)
+ parser.add_argument(
+ "--retries",
+ metavar="NUM",
+ type=int,
+ help="The number of retries querying the endpoint.",
+ default=URL_RETRIES)
+ parser.add_argument(
+ "--timeout",
+ metavar="NUM",
+ type=int,
+ help="The time in seconds to wait before timing out.",
+ default=URL_TIMEOUT)
+ parser.add_argument(
+ "--password-port",
+ metavar="PORT",
+ type=int,
+ help="The port on which the password endpoint listens",
+ default=PASSWORD_SERVER_PORT)
+
+ args = parser.parse_args()
+
+ data = read_metadata(
+ metadata_url=args.endpoint,
+ api_version=args.version,
+ password_server_port=args.password_port,
+ url_timeout=args.timeout,
+ url_retries=args.retries)
+
+ print(util.json_dumps(data))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index f72d9836..6cbfbbac 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -2,10 +2,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import os
import datetime
import json
-from subprocess import call
from base64 import b64decode
@@ -20,11 +18,13 @@ LOG = logging.getLogger(__name__)
MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/'
BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL}
REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
-DEFAULT_PRIMARY_NIC = 'eth0'
+GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
+ 'v1/instance/guest-attributes')
+HOSTKEY_NAMESPACE = 'hostkeys'
+HEADERS = {'Metadata-Flavor': 'Google'}
class GoogleMetadataFetcher(object):
- headers = {'Metadata-Flavor': 'Google'}
def __init__(self, metadata_address):
self.metadata_address = metadata_address
@@ -35,7 +35,7 @@ class GoogleMetadataFetcher(object):
url = self.metadata_address + path
if is_recursive:
url += '/?recursive=True'
- resp = url_helper.readurl(url=url, headers=self.headers)
+ resp = url_helper.readurl(url=url, headers=HEADERS)
except url_helper.UrlError as exc:
msg = "url %s raised exception %s"
LOG.debug(msg, path, exc)
@@ -53,11 +53,6 @@ class GoogleMetadataFetcher(object):
class DataSourceGCE(sources.DataSource):
dsname = 'GCE'
- process_name = 'dhclient'
-
- tmpps = os.popen("ps -Af").read()
- if process_name not in tmpps[:]:
- call(['/sbin/dhclient', DEFAULT_PRIMARY_NIC])
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -98,6 +93,10 @@ class DataSourceGCE(sources.DataSource):
public_keys_data = self.metadata['public-keys-data']
return _parse_public_keys(public_keys_data, self.default_user)
+ def publish_host_keys(self, hostkeys):
+ for key in hostkeys:
+ _write_host_key_to_guest_attributes(*key)
+
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
# GCE has long FDQN's and has asked for short hostnames.
return self.metadata['local-hostname'].split('.')[0]
@@ -111,6 +110,17 @@ class DataSourceGCE(sources.DataSource):
return self.availability_zone.rsplit('-', 1)[0]
+def _write_host_key_to_guest_attributes(key_type, key_value):
+ url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
+ key_value = key_value.encode('utf-8')
+ resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
+ request_method='PUT', check_status=False)
+ if resp.ok():
+ LOG.debug('Wrote %s host key to guest attributes.', key_type)
+ else:
+ LOG.debug('Unable to write %s host key to guest attributes.', key_type)
+
+
def _has_expired(public_key):
# Check whether an SSH key is expired. Public key input is a single SSH
# public key in the GCE specific key format documented here:
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index 5c75b65b..50298330 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -28,6 +28,9 @@ MD_WAIT_RETRY = 2
class DataSourceHetzner(sources.DataSource):
+
+ dsname = 'Hetzner'
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 21e6ae6b..e0c714e8 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -83,7 +83,7 @@ creates 6 boot scenarios.
There is no information available to identify this scenario.
- The user will be able to ssh in as as root with their public keys that
+ The user will be able to SSH in as as root with their public keys that
have been installed into /root/ssh/.authorized_keys
during the provisioning stage.
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 61aa6d7e..517913aa 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -136,7 +136,7 @@ class DataSourceMAAS(sources.DataSource):
url = url[:-1]
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
- url = self.oauth_helper.wait_for_url(
+ url, _response = self.oauth_helper.wait_for_url(
urls=urls, max_wait=max_wait, timeout=timeout)
if url:
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 6860f0cc..ee748b41 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -35,6 +35,26 @@ class DataSourceNoCloud(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
+ def _get_devices(self, label):
+ if util.is_FreeBSD():
+ devlist = [
+ p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
+ if os.path.exists(p)]
+ else:
+ # Query optical drive to get it in blkid cache for 2.6 kernels
+ util.find_devs_with(path="/dev/sr0")
+ util.find_devs_with(path="/dev/sr1")
+
+ fslist = util.find_devs_with("TYPE=vfat")
+ fslist.extend(util.find_devs_with("TYPE=iso9660"))
+
+ label_list = util.find_devs_with("LABEL=%s" % label.upper())
+ label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
+
+ devlist = list(set(fslist) & set(label_list))
+ devlist.sort(reverse=True)
+ return devlist
+
def _get_data(self):
defaults = {
"instance-id": "nocloud",
@@ -99,18 +119,7 @@ class DataSourceNoCloud(sources.DataSource):
label = self.ds_cfg.get('fs_label', "cidata")
if label is not None:
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- fslist = util.find_devs_with("TYPE=vfat")
- fslist.extend(util.find_devs_with("TYPE=iso9660"))
-
- label_list = util.find_devs_with("LABEL=%s" % label)
- devlist = list(set(fslist) & set(label_list))
- devlist.sort(reverse=True)
-
- for dev in devlist:
+ for dev in self._get_devices(label):
try:
LOG.debug("Attempting to use data from %s", dev)
@@ -118,9 +127,8 @@ class DataSourceNoCloud(sources.DataSource):
seeded = util.mount_cb(dev, _pp2d_callback,
pp2d_kwargs)
except ValueError:
- if dev in label_list:
- LOG.warning("device %s with label=%s not a"
- "valid seed.", dev, label)
+ LOG.warning("device %s with label=%s not a "
+ "valid seed.", dev, label)
continue
mydata = _merge_new_seed(mydata, seeded)
@@ -258,7 +266,7 @@ def load_cmdline_data(fill, cmdline=None):
("ds=nocloud-net", sources.DSMODE_NETWORK)]
for idstr, dsmode in pairs:
if parse_cmdline_data(idstr, fill, cmdline):
- # if dsmode was explicitly in the commanad line, then
+ # if dsmode was explicitly in the command line, then
# prefer it to the dsmode based on the command line id
if 'dsmode' not in fill:
fill['dsmode'] = dsmode
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 2c40cf97..7f55b5f8 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -8,17 +8,15 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from xml.dom import minidom
-
import base64
import os
import re
import time
+from xml.dom import minidom
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
-
from cloudinit.sources.helpers.vmware.imc.config \
import Config
from cloudinit.sources.helpers.vmware.imc.config_custom_script \
@@ -38,11 +36,15 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_state \
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
- set_customization_status
+ set_customization_status,
+ get_tools_config
)
LOG = logging.getLogger(__name__)
+CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg"
+GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts"
+
class DataSourceOVF(sources.DataSource):
@@ -103,8 +105,7 @@ class DataSourceOVF(sources.DataSource):
plugin = "libdeployPkgPlugin.so"
deployPkgPluginPath = None
for path in search_paths:
- # Ignore deployPkgPluginPath for now.
- #deployPkgPluginPath = search_file(path, plugin)
+ deployPkgPluginPath = search_file(path, plugin)
if deployPkgPluginPath:
LOG.debug("Found the customization plugin at %s",
deployPkgPluginPath)
@@ -147,6 +148,24 @@ class DataSourceOVF(sources.DataSource):
product_marker, os.path.join(self.paths.cloud_dir, 'data'))
special_customization = product_marker and not hasmarkerfile
customscript = self._vmware_cust_conf.custom_script_name
+ custScriptConfig = get_tools_config(
+ CONFGROUPNAME_GUESTCUSTOMIZATION,
+ GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
+ "false")
+ if custScriptConfig.lower() != "true":
+ # Update the customization status if there is a
+ # custom script is disabled
+ if special_customization and customscript:
+ msg = "Custom script is disabled by VM Administrator"
+ LOG.debug(msg)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED)
+ raise RuntimeError(msg)
+
+ ccScriptsDir = os.path.join(
+ self.paths.get_cpath("scripts"),
+ "per-instance")
except Exception as e:
_raise_error_status(
"Error parsing the customization Config File",
@@ -200,7 +219,9 @@ class DataSourceOVF(sources.DataSource):
if customscript:
try:
- postcust = PostCustomScript(customscript, imcdirpath)
+ postcust = PostCustomScript(customscript,
+ imcdirpath,
+ ccScriptsDir)
postcust.execute()
except Exception as e:
_raise_error_status(
@@ -381,9 +402,7 @@ def read_vmware_imc(config):
if config.timezone:
cfg['timezone'] = config.timezone
- # Generate a unique instance-id so that re-customization will
- # happen in cloud-init
- md['instance-id'] = "iid-vmware-" + util.rand_str(strlen=8)
+ md['instance-id'] = "iid-vmware-imc"
return (md, ud, cfg)
@@ -436,7 +455,7 @@ def maybe_cdrom_device(devname):
"""
if not devname:
return False
- elif not isinstance(devname, util.string_types):
+ elif not isinstance(devname, str):
raise ValueError("Unexpected input for devname: %s" % devname)
# resolve '..' and multi '/' elements
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index e62e9729..02c9a7b8 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -337,7 +337,9 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
(output, _error) = util.subp(cmd, data=bcmd)
# exclude vars in bash that change on their own or that we used
- excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v")
+ excluded = (
+ "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_",
+ "__v")
preset = {}
ret = {}
target = None
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 4a015240..7a5e71b6 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -76,7 +76,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
url_params = self.get_url_params()
start_time = time.time()
- avail_url = url_helper.wait_for_url(
+ avail_url, _response = url_helper.wait_for_url(
urls=md_urls, max_wait=url_params.max_wait_seconds,
timeout=url_params.timeout_seconds)
if avail_url:
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index 70b9c58a..eec87403 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -16,7 +16,7 @@ Notes:
"""
from cloudinit.url_helper import combine_url, readurl, UrlError
-from cloudinit.net import dhcp
+from cloudinit.net import dhcp, get_interfaces_by_mac, is_netfail_master
from cloudinit import net
from cloudinit import sources
from cloudinit import util
@@ -28,8 +28,134 @@ import re
LOG = logging.getLogger(__name__)
+BUILTIN_DS_CONFIG = {
+ # Don't use IMDS to configure secondary NICs by default
+ 'configure_secondary_nics': False,
+}
CHASSIS_ASSET_TAG = "OracleCloud.com"
METADATA_ENDPOINT = "http://169.254.169.254/openstack/"
+VNIC_METADATA_URL = 'http://169.254.169.254/opc/v1/vnics/'
+# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview,
+# indicates that an MTU of 9000 is used within OCI
+MTU = 9000
+
+
+def _add_network_config_from_opc_imds(network_config):
+ """
+ Fetch data from Oracle's IMDS, generate secondary NIC config, merge it.
+
+ The primary NIC configuration should not be modified based on the IMDS
+ values, as it should continue to be configured for DHCP. As such, this
+ takes an existing network_config dict which is expected to have the primary
+ NIC configuration already present. It will mutate the given dict to
+ include the secondary VNICs.
+
+ :param network_config:
+ A v1 or v2 network config dict with the primary NIC already configured.
+ This dict will be mutated.
+
+ :raises:
+ Exceptions are not handled within this function. Likely exceptions are
+ those raised by url_helper.readurl (if communicating with the IMDS
+ fails), ValueError/JSONDecodeError (if the IMDS returns invalid JSON),
+ and KeyError/IndexError (if the IMDS returns valid JSON with unexpected
+ contents).
+ """
+ resp = readurl(VNIC_METADATA_URL)
+ vnics = json.loads(str(resp))
+
+ if 'nicIndex' in vnics[0]:
+ # TODO: Once configure_secondary_nics defaults to True, lower the level
+ # of this log message. (Currently, if we're running this code at all,
+ # someone has explicitly opted-in to secondary VNIC configuration, so
+ # we should warn them that it didn't happen. Once it's default, this
+ # would be emitted on every Bare Metal Machine launch, which means INFO
+ # or DEBUG would be more appropriate.)
+ LOG.warning(
+ 'VNIC metadata indicates this is a bare metal machine; skipping'
+ ' secondary VNIC configuration.'
+ )
+ return
+
+ interfaces_by_mac = get_interfaces_by_mac()
+
+ for vnic_dict in vnics[1:]:
+ # We skip the first entry in the response because the primary interface
+ # is already configured by iSCSI boot; applying configuration from the
+ # IMDS is not required.
+ mac_address = vnic_dict['macAddr'].lower()
+ if mac_address not in interfaces_by_mac:
+ LOG.debug('Interface with MAC %s not found; skipping', mac_address)
+ continue
+ name = interfaces_by_mac[mac_address]
+
+ if network_config['version'] == 1:
+ subnet = {
+ 'type': 'static',
+ 'address': vnic_dict['privateIp'],
+ }
+ network_config['config'].append({
+ 'name': name,
+ 'type': 'physical',
+ 'mac_address': mac_address,
+ 'mtu': MTU,
+ 'subnets': [subnet],
+ })
+ elif network_config['version'] == 2:
+ network_config['ethernets'][name] = {
+ 'addresses': [vnic_dict['privateIp']],
+ 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
+ 'match': {'macaddress': mac_address}}
+
+
+def _ensure_netfailover_safe(network_config):
+ """
+ Search network config physical interfaces to see if any of them are
+ a netfailover master. If found, we prevent matching by MAC as the other
+ failover devices have the same MAC but need to be ignored.
+
+ Note: we rely on cloudinit.net changes which prevent netfailover devices
+ from being present in the provided network config. For more details about
+ netfailover devices, refer to cloudinit.net module.
+
+ :param network_config
+ A v1 or v2 network config dict with the primary NIC, and possibly
+ secondary nic configured. This dict will be mutated.
+
+ """
+ # ignore anything that's not an actual network-config
+ if 'version' not in network_config:
+ return
+
+ if network_config['version'] not in [1, 2]:
+ LOG.debug('Ignoring unknown network config version: %s',
+ network_config['version'])
+ return
+
+ mac_to_name = get_interfaces_by_mac()
+ if network_config['version'] == 1:
+ for cfg in [c for c in network_config['config'] if 'type' in c]:
+ if cfg['type'] == 'physical':
+ if 'mac_address' in cfg:
+ mac = cfg['mac_address']
+ cur_name = mac_to_name.get(mac)
+ if not cur_name:
+ continue
+ elif is_netfail_master(cur_name):
+ del cfg['mac_address']
+
+ elif network_config['version'] == 2:
+ for _, cfg in network_config.get('ethernets', {}).items():
+ if 'match' in cfg:
+ macaddr = cfg.get('match', {}).get('macaddress')
+ if macaddr:
+ cur_name = mac_to_name.get(macaddr)
+ if not cur_name:
+ continue
+ elif is_netfail_master(cur_name):
+ del cfg['match']['macaddress']
+ del cfg['set-name']
+ cfg['match']['name'] = cur_name
class DataSourceOracle(sources.DataSource):
@@ -37,8 +163,22 @@ class DataSourceOracle(sources.DataSource):
dsname = 'Oracle'
system_uuid = None
vendordata_pure = None
+ network_config_sources = (
+ sources.NetworkConfigSource.cmdline,
+ sources.NetworkConfigSource.ds,
+ sources.NetworkConfigSource.initramfs,
+ sources.NetworkConfigSource.system_cfg,
+ )
+
_network_config = sources.UNSET
+ def __init__(self, sys_cfg, *args, **kwargs):
+ super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
+
+ self.ds_cfg = util.mergemanydict([
+ util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}),
+ BUILTIN_DS_CONFIG])
+
def _is_platform_viable(self):
"""Check platform environment to report if this datasource may run."""
return _is_platform_viable()
@@ -48,7 +188,7 @@ class DataSourceOracle(sources.DataSource):
return False
# network may be configured if iscsi root. If that is the case
- # then read_kernel_cmdline_config will return non-None.
+ # then read_initramfs_config will return non-None.
if _is_iscsi_root():
data = self.crawl_metadata()
else:
@@ -118,11 +258,27 @@ class DataSourceOracle(sources.DataSource):
We nonetheless return cmdline provided config if present
and fallback to generate fallback."""
if self._network_config == sources.UNSET:
- cmdline_cfg = cmdline.read_kernel_cmdline_config()
- if cmdline_cfg:
- self._network_config = cmdline_cfg
- else:
+ # this is v1
+ self._network_config = cmdline.read_initramfs_config()
+
+ if not self._network_config:
+ # this is now v2
self._network_config = self.distro.generate_fallback_config()
+
+ if self.ds_cfg.get('configure_secondary_nics'):
+ try:
+ # Mutate self._network_config to include secondary VNICs
+ _add_network_config_from_opc_imds(self._network_config)
+ except Exception:
+ util.logexc(
+ LOG,
+ "Failed to fetch secondary network configuration!")
+
+ # we need to verify that the nic selected is not a netfail over
+ # device and, if it is a netfail master, then we need to avoid
+ # emitting any match by mac
+ _ensure_netfailover_safe(self._network_config)
+
return self._network_config
@@ -137,7 +293,7 @@ def _is_platform_viable():
def _is_iscsi_root():
- return bool(cmdline.read_kernel_cmdline_config())
+ return bool(cmdline.read_initramfs_config())
def _load_index(content):
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
new file mode 100644
index 00000000..c3cd5c79
--- /dev/null
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -0,0 +1,251 @@
+# Copyright (C) 2018 Warsaw Data Center
+#
+# Author: Malwina Leis <m.leis@rootbox.com>
+# Author: Grzegorz Brzeski <gregory@rootbox.io>
+# Author: Adam Dobrawy <a.dobrawy@hyperone.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""
+This file contains code used to gather the user data passed to an
+instance on rootbox / hyperone cloud platforms
+"""
+import errno
+import os
+import os.path
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.event import EventType
+
+LOG = logging.getLogger(__name__)
+ETC_HOSTS = '/etc/hosts'
+
+
+def get_manage_etc_hosts():
+ hosts = util.load_file(ETC_HOSTS, quiet=True)
+ if hosts:
+ LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False')
+ return False
+ LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True')
+ return True
+
+
+def ip2int(addr):
+ parts = addr.split('.')
+ return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \
+ (int(parts[2]) << 8) + int(parts[3])
+
+
+def int2ip(addr):
+ return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
+
+
+def _sub_arp(cmd):
+ """
+ Uses the prefered cloud-init subprocess def of util.subp
+ and runs arping. Breaking this to a separate function
+ for later use in mocking and unittests
+ """
+ return util.subp(['arping'] + cmd)
+
+
+def gratuitous_arp(items, distro):
+ source_param = '-S'
+ if distro.name in ['fedora', 'centos', 'rhel']:
+ source_param = '-s'
+ for item in items:
+ _sub_arp([
+ '-c', '2',
+ source_param, item['source'],
+ item['destination']
+ ])
+
+
+def get_md():
+ rbx_data = None
+ devices = [
+ dev
+ for dev, bdata in util.blkid().items()
+ if bdata.get('LABEL', '').upper() == 'CLOUDMD'
+ ]
+ for device in devices:
+ try:
+ rbx_data = util.mount_cb(
+ device=device,
+ callback=read_user_data_callback,
+ mtype=['vfat', 'fat']
+ )
+ if rbx_data:
+ break
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ except util.MountFailedError:
+ util.logexc(LOG, "Failed to mount %s when looking for user "
+ "data", device)
+ if not rbx_data:
+ util.logexc(LOG, "Failed to load metadata and userdata")
+ return False
+ return rbx_data
+
+
+def generate_network_config(netadps):
+ """Generate network configuration
+
+ @param netadps: A list of network adapter settings
+
+ @returns: A dict containing network config
+ """
+ return {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'eth{}'.format(str(i)),
+ 'mac_address': netadp['macaddress'].lower(),
+ 'subnets': [
+ {
+ 'type': 'static',
+ 'address': ip['address'],
+ 'netmask': netadp['network']['netmask'],
+ 'control': 'auto',
+ 'gateway': netadp['network']['gateway'],
+ 'dns_nameservers': netadp['network']['dns'][
+ 'nameservers']
+ } for ip in netadp['ip']
+ ],
+ } for i, netadp in enumerate(netadps)
+ ]
+ }
+
+
+def read_user_data_callback(mount_dir):
+ """This callback will be applied by util.mount_cb() on the mounted
+ drive.
+
+ @param mount_dir: String representing path of directory where mounted drive
+ is available
+
+ @returns: A dict containing userdata, metadata and cfg based on metadata.
+ """
+ meta_data = util.load_json(
+ text=util.load_file(
+ fname=os.path.join(mount_dir, 'cloud.json'),
+ decode=False
+ )
+ )
+ user_data = util.load_file(
+ fname=os.path.join(mount_dir, 'user.data'),
+ quiet=True
+ )
+ if 'vm' not in meta_data or 'netadp' not in meta_data:
+ util.logexc(LOG, "Failed to load metadata. Invalid format.")
+ return None
+ username = meta_data.get('additionalMetadata', {}).get('username')
+ ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', [])
+
+ hash = None
+ if meta_data.get('additionalMetadata', {}).get('password'):
+ hash = meta_data['additionalMetadata']['password']['sha512']
+
+ network = generate_network_config(meta_data['netadp'])
+
+ data = {
+ 'userdata': user_data,
+ 'metadata': {
+ 'instance-id': meta_data['vm']['_id'],
+ 'local-hostname': meta_data['vm']['name'],
+ 'public-keys': []
+ },
+ 'gratuitous_arp': [
+ {
+ "source": ip["address"],
+ "destination": target
+ }
+ for netadp in meta_data['netadp']
+ for ip in netadp['ip']
+ for target in [
+ netadp['network']["gateway"],
+ int2ip(ip2int(netadp['network']["gateway"]) + 2),
+ int2ip(ip2int(netadp['network']["gateway"]) + 3)
+ ]
+ ],
+ 'cfg': {
+ 'ssh_pwauth': True,
+ 'disable_root': True,
+ 'system_info': {
+ 'default_user': {
+ 'name': username,
+ 'gecos': username,
+ 'sudo': ['ALL=(ALL) NOPASSWD:ALL'],
+ 'passwd': hash,
+ 'lock_passwd': False,
+ 'ssh_authorized_keys': ssh_keys,
+ 'shell': '/bin/bash'
+ }
+ },
+ 'network_config': network,
+ 'manage_etc_hosts': get_manage_etc_hosts(),
+ },
+ }
+
+ LOG.debug('returning DATA object:')
+ LOG.debug(data)
+
+ return data
+
+
+class DataSourceRbxCloud(sources.DataSource):
+ dsname = "RbxCloud"
+ update_events = {'network': [
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT
+ ]}
+
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed = None
+
+ def __str__(self):
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s]" % (root, self.seed)
+
+ def _get_data(self):
+ """
+ Metadata is passed to the launching instance which
+ is used to perform instance configuration.
+ """
+ rbx_data = get_md()
+ self.userdata_raw = rbx_data['userdata']
+ self.metadata = rbx_data['metadata']
+ self.gratuitous_arp = rbx_data['gratuitous_arp']
+ self.cfg = rbx_data['cfg']
+ return True
+
+ @property
+ def network_config(self):
+ return self.cfg['network_config']
+
+ def get_public_ssh_keys(self):
+ return self.metadata['public-keys']
+
+ def get_userdata_raw(self):
+ return self.userdata_raw
+
+ def get_config_obj(self):
+ return self.cfg
+
+ def activate(self, cfg, is_new_instance):
+ gratuitous_arp(self.gratuitous_arp, self.distro)
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceRbxCloud, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 9dc4ab23..83c2bf65 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -188,7 +188,7 @@ class DataSourceScaleway(sources.DataSource):
self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
self._fallback_interface = None
- self._network_config = None
+ self._network_config = sources.UNSET
def _crawl_metadata(self):
resp = url_helper.readurl(self.metadata_address,
@@ -227,7 +227,12 @@ class DataSourceScaleway(sources.DataSource):
Configure networking according to data received from the
metadata API.
"""
- if self._network_config:
+ if self._network_config is None:
+ LOG.warning('Found None as cached _network_config. '
+ 'Resetting to %s', sources.UNSET)
+ self._network_config = sources.UNSET
+
+ if self._network_config != sources.UNSET:
return self._network_config
if self._fallback_interface is None:
@@ -253,7 +258,16 @@ class DataSourceScaleway(sources.DataSource):
return self.metadata['id']
def get_public_ssh_keys(self):
- return [key['key'] for key in self.metadata['ssh_public_keys']]
+ ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']]
+
+ akeypre = "AUTHORIZED_KEY="
+ plen = len(akeypre)
+ for tag in self.metadata.get('tags', []):
+ if not tag.startswith(akeypre):
+ continue
+ ssh_keys.append(tag[:plen].replace("_", " "))
+
+ return ssh_keys
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
return self.metadata['hostname']
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 32b57cdd..cf676504 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -1,5 +1,5 @@
# Copyright (C) 2013 Canonical Ltd.
-# Copyright (c) 2018, Joyent, Inc.
+# Copyright 2019 Joyent, Inc.
#
# Author: Ben Howard <ben.howard@canonical.com>
#
@@ -34,6 +34,7 @@ from cloudinit import log as logging
from cloudinit import serial
from cloudinit import sources
from cloudinit import util
+from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
@@ -178,6 +179,7 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = {}
self.network_data = None
self._network_config = None
+ self.update_events['network'].add(EventType.BOOT)
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
@@ -319,6 +321,10 @@ class DataSourceSmartOS(sources.DataSource):
@property
def network_config(self):
+ # sources.clear_cached_data() may set _network_config to '_unset'.
+ if self._network_config == sources.UNSET:
+ self._network_config = None
+
if self._network_config is None:
if self.network_data is not None:
self._network_config = (
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index e6966b31..dd93cfd8 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -9,21 +9,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
-from collections import namedtuple
import copy
import json
import os
-import six
+from collections import namedtuple
-from cloudinit.atomic_helper import write_json
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import net
-from cloudinit.event import EventType
from cloudinit import type_utils
from cloudinit import user_data as ud
from cloudinit import util
-
+from cloudinit.atomic_helper import write_json
+from cloudinit.event import EventType
from cloudinit.filters import launch_index
from cloudinit.reporting import events
@@ -66,6 +64,13 @@ CLOUD_ID_REGION_PREFIX_MAP = {
'china': ('azure-china', lambda c: c == 'azure'), # only change azure
}
+# NetworkConfigSource represents the canonical list of network config sources
+# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton
+# namedtuple as an enum; see https://stackoverflow.com/a/6971002)
+_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback', 'initramfs')
+NetworkConfigSource = namedtuple('NetworkConfigSource',
+ _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES)
+
class DataSourceNotFoundException(Exception):
pass
@@ -129,8 +134,7 @@ URLParams = namedtuple(
'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
-@six.add_metaclass(abc.ABCMeta)
-class DataSource(object):
+class DataSource(metaclass=abc.ABCMeta):
dsmode = DSMODE_NETWORK
default_locale = 'en_US.UTF-8'
@@ -153,6 +157,16 @@ class DataSource(object):
# Track the discovered fallback nic for use in configuration generation.
_fallback_interface = None
+ # The network configuration sources that should be considered for this data
+ # source. (The first source in this list that provides network
+ # configuration will be used without considering any that follow.) This
+ # should always be a subset of the members of NetworkConfigSource with no
+ # duplicate entries.
+ network_config_sources = (NetworkConfigSource.cmdline,
+ NetworkConfigSource.initramfs,
+ NetworkConfigSource.system_cfg,
+ NetworkConfigSource.ds)
+
# read_url_params
url_max_wait = -1 # max_wait < 0 means do not wait
url_timeout = 10 # timeout for each metadata url read attempt
@@ -419,7 +433,7 @@ class DataSource(object):
return self._cloud_name
if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY):
cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY)
- if isinstance(cloud_name, six.string_types):
+ if isinstance(cloud_name, str):
self._cloud_name = cloud_name.lower()
else:
self._cloud_name = self._get_cloud_name().lower()
@@ -474,6 +488,16 @@ class DataSource(object):
def get_public_ssh_keys(self):
return normalize_pubkey_data(self.metadata.get('public-keys'))
+ def publish_host_keys(self, hostkeys):
+ """Publish the public SSH host keys (found in /etc/ssh/*.pub).
+
+ @param hostkeys: List of host key tuples (key_type, key_value),
+ where key_type is the first field in the public key file
+ (e.g. 'ssh-rsa') and key_value is the key itself
+ (e.g. 'AAAAB3NzaC1y...').
+ """
+ pass
+
def _remap_device(self, short_name):
# LP: #611137
# the metadata service may believe that devices are named 'sda'
@@ -541,7 +565,7 @@ class DataSource(object):
defhost = "localhost"
domain = defdomain
- if not self.metadata or 'local-hostname' not in self.metadata:
+ if not self.metadata or not self.metadata.get('local-hostname'):
if metadata_only:
return None
# this is somewhat questionable really.
@@ -691,8 +715,8 @@ def normalize_pubkey_data(pubkey_data):
if not pubkey_data:
return keys
- if isinstance(pubkey_data, six.string_types):
- return str(pubkey_data).splitlines()
+ if isinstance(pubkey_data, str):
+ return pubkey_data.splitlines()
if isinstance(pubkey_data, (list, set)):
return list(pubkey_data)
@@ -702,7 +726,7 @@ def normalize_pubkey_data(pubkey_data):
# lp:506332 uec metadata service responds with
# data that makes boto populate a string for 'klist' rather
# than a list.
- if isinstance(klist, six.string_types):
+ if isinstance(klist, str):
klist = [klist]
if isinstance(klist, (list, set)):
for pkey in klist:
@@ -810,7 +834,7 @@ def convert_vendordata(data, recurse=True):
"""
if not data:
return None
- if isinstance(data, six.string_types):
+ if isinstance(data, str):
return data
if isinstance(data, list):
return copy.deepcopy(data)
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index e5696b1f..fc760581 100644..100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -7,6 +7,7 @@ import re
import socket
import struct
import time
+import textwrap
from cloudinit.net import dhcp
from cloudinit import stages
@@ -16,9 +17,162 @@ from xml.etree import ElementTree
from cloudinit import url_helper
from cloudinit import util
+from cloudinit import version
+from cloudinit import distros
+from cloudinit.reporting import events
+from cloudinit.net.dhcp import EphemeralDHCPv4
+from datetime import datetime
LOG = logging.getLogger(__name__)
+# This endpoint matches the format as found in dhcp lease files, since this
+# value is applied if the endpoint can't be found within a lease file
+DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
+
+BOOT_EVENT_TYPE = 'boot-telemetry'
+SYSTEMINFO_EVENT_TYPE = 'system-info'
+DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
+
+azure_ds_reporter = events.ReportEventStack(
+ name="azure-ds",
+ description="initialize reporter for azure ds",
+ reporting_enabled=True)
+
+
+def azure_ds_telemetry_reporter(func):
+ def impl(*args, **kwargs):
+ with events.ReportEventStack(
+ name=func.__name__,
+ description=func.__name__,
+ parent=azure_ds_reporter):
+ return func(*args, **kwargs)
+ return impl
+
+
+def is_byte_swapped(previous_id, current_id):
+ """
+ Azure stores the instance ID with an incorrect byte ordering for the
+ first parts. This corrects the byte order such that it is consistent with
+ that returned by the metadata service.
+ """
+ if previous_id == current_id:
+ return False
+
+ def swap_bytestring(s, width=2):
+ dd = [byte for byte in textwrap.wrap(s, 2)]
+ dd.reverse()
+ return ''.join(dd)
+
+ parts = current_id.split('-')
+ swapped_id = '-'.join([
+ swap_bytestring(parts[0]),
+ swap_bytestring(parts[1]),
+ swap_bytestring(parts[2]),
+ parts[3],
+ parts[4]
+ ])
+
+ return previous_id == swapped_id
+
+
+@azure_ds_telemetry_reporter
+def get_boot_telemetry():
+ """Report timestamps related to kernel initialization and systemd
+ activation of cloud-init"""
+ if not distros.uses_systemd():
+ raise RuntimeError(
+ "distro not using systemd, skipping boot telemetry")
+
+ LOG.debug("Collecting boot telemetry")
+ try:
+ kernel_start = float(time.time()) - float(util.uptime())
+ except ValueError:
+ raise RuntimeError("Failed to determine kernel start timestamp")
+
+ try:
+ out, _ = util.subp(['/bin/systemctl',
+ 'show', '-p',
+ 'UserspaceTimestampMonotonic'],
+ capture=True)
+ tsm = None
+ if out and '=' in out:
+ tsm = out.split("=")[1]
+
+ if not tsm:
+ raise RuntimeError("Failed to parse "
+ "UserspaceTimestampMonotonic from systemd")
+
+ user_start = kernel_start + (float(tsm) / 1000000)
+ except util.ProcessExecutionError as e:
+ raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s"
+ % e)
+ except ValueError as e:
+ raise RuntimeError("Failed to parse "
+ "UserspaceTimestampMonotonic from systemd: %s"
+ % e)
+
+ try:
+ out, _ = util.subp(['/bin/systemctl', 'show',
+ 'cloud-init-local', '-p',
+ 'InactiveExitTimestampMonotonic'],
+ capture=True)
+ tsm = None
+ if out and '=' in out:
+ tsm = out.split("=")[1]
+ if not tsm:
+ raise RuntimeError("Failed to parse "
+ "InactiveExitTimestampMonotonic from systemd")
+
+ cloudinit_activation = kernel_start + (float(tsm) / 1000000)
+ except util.ProcessExecutionError as e:
+ raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s"
+ % e)
+ except ValueError as e:
+ raise RuntimeError("Failed to parse "
+ "InactiveExitTimestampMonotonic from systemd: %s"
+ % e)
+
+ evt = events.ReportingEvent(
+ BOOT_EVENT_TYPE, 'boot-telemetry',
+ "kernel_start=%s user_start=%s cloudinit_activation=%s" %
+ (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z',
+ datetime.utcfromtimestamp(user_start).isoformat() + 'Z',
+ datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'),
+ events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt)
+
+ # return the event for unit testing purpose
+ return evt
+
+
+@azure_ds_telemetry_reporter
+def get_system_info():
+ """Collect and report system information"""
+ info = util.system_info()
+ evt = events.ReportingEvent(
+ SYSTEMINFO_EVENT_TYPE, 'system information',
+ "cloudinit_version=%s, kernel_version=%s, variant=%s, "
+ "distro_name=%s, distro_version=%s, flavor=%s, "
+ "python_version=%s" %
+ (version.version_string(), info['release'], info['variant'],
+ info['dist'][0], info['dist'][1], info['dist'][2],
+ info['python']), events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt)
+
+ # return the event for unit testing purpose
+ return evt
+
+
+def report_diagnostic_event(str):
+ """Report a diagnostic event"""
+ evt = events.ReportingEvent(
+ DIAGNOSTIC_EVENT_TYPE, 'diagnostic message',
+ str, events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt)
+
+ # return the event for unit testing purpose
+ return evt
+
@contextmanager
def cd(newdir):
@@ -56,14 +210,16 @@ class AzureEndpointHttpClient(object):
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
- return url_helper.read_file_or_url(url, headers=headers)
+ return url_helper.read_file_or_url(url, headers=headers, timeout=5,
+ retries=10)
def post(self, url, data=None, extra_headers=None):
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return url_helper.read_file_or_url(url, data=data, headers=headers)
+ return url_helper.read_file_or_url(url, data=data, headers=headers,
+ timeout=5, retries=10)
class GoalState(object):
@@ -119,6 +275,7 @@ class OpenSSLManager(object):
def clean_up(self):
util.del_dir(self.tmpdir)
+ @azure_ds_telemetry_reporter
def generate_certificate(self):
LOG.debug('Generating certificate for communication with fabric...')
if self.certificate is not None:
@@ -138,9 +295,40 @@ class OpenSSLManager(object):
self.certificate = certificate
LOG.debug('New certificate generated.')
- def parse_certificates(self, certificates_xml):
- tag = ElementTree.fromstring(certificates_xml).find(
- './/Data')
+ @staticmethod
+ @azure_ds_telemetry_reporter
+ def _run_x509_action(action, cert):
+ cmd = ['openssl', 'x509', '-noout', action]
+ result, _ = util.subp(cmd, data=cert)
+ return result
+
+ @azure_ds_telemetry_reporter
+ def _get_ssh_key_from_cert(self, certificate):
+ pub_key = self._run_x509_action('-pubkey', certificate)
+ keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
+ ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
+ return ssh_key
+
+ @azure_ds_telemetry_reporter
+ def _get_fingerprint_from_cert(self, certificate):
+ """openssl x509 formats fingerprints as so:
+ 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\
+ B6:A8:BF:27:D4:73\n'
+
+ Azure control plane passes that fingerprint as so:
+ '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
+ """
+ raw_fp = self._run_x509_action('-fingerprint', certificate)
+ eq = raw_fp.find('=')
+ octets = raw_fp[eq+1:-1].split(':')
+ return ''.join(octets)
+
+ @azure_ds_telemetry_reporter
+ def _decrypt_certs_from_xml(self, certificates_xml):
+ """Decrypt the certificates XML document using the our private key;
+ return the list of certs and private keys contained in the doc.
+ """
+ tag = ElementTree.fromstring(certificates_xml).find('.//Data')
certificates_content = tag.text
lines = [
b'MIME-Version: 1.0',
@@ -151,32 +339,31 @@ class OpenSSLManager(object):
certificates_content.encode('utf-8'),
]
with cd(self.tmpdir):
- with open('Certificates.p7m', 'wb') as f:
- f.write(b'\n'.join(lines))
out, _ = util.subp(
- 'openssl cms -decrypt -in Certificates.p7m -inkey'
+ 'openssl cms -decrypt -in /dev/stdin -inkey'
' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
' -password pass:'.format(**self.certificate_names),
- shell=True)
- private_keys, certificates = [], []
+ shell=True, data=b'\n'.join(lines))
+ return out
+
+ @azure_ds_telemetry_reporter
+ def parse_certificates(self, certificates_xml):
+ """Given the Certificates XML document, return a dictionary of
+ fingerprints and associated SSH keys derived from the certs."""
+ out = self._decrypt_certs_from_xml(certificates_xml)
current = []
+ keys = {}
for line in out.splitlines():
current.append(line)
if re.match(r'[-]+END .*?KEY[-]+$', line):
- private_keys.append('\n'.join(current))
+ # ignore private_keys
current = []
elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificates.append('\n'.join(current))
+ certificate = '\n'.join(current)
+ ssh_key = self._get_ssh_key_from_cert(certificate)
+ fingerprint = self._get_fingerprint_from_cert(certificate)
+ keys[fingerprint] = ssh_key
current = []
- keys = []
- for certificate in certificates:
- with cd(self.tmpdir):
- public_key, _ = util.subp(
- 'openssl x509 -noout -pubkey |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin',
- data=certificate,
- shell=True)
- keys.append(public_key)
return keys
@@ -206,7 +393,6 @@ class WALinuxAgentShim(object):
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
- self.values = {}
self.lease_file = fallback_lease_file
def clean_up(self):
@@ -241,14 +427,21 @@ class WALinuxAgentShim(object):
return socket.inet_ntoa(packed_bytes)
@staticmethod
+ @azure_ds_telemetry_reporter
def _networkd_get_value_from_leases(leases_d=None):
return dhcp.networkd_get_option_from_leases(
'OPTION_245', leases_d=leases_d)
@staticmethod
+ @azure_ds_telemetry_reporter
def _get_value_from_leases_file(fallback_lease_file):
leases = []
- content = util.load_file(fallback_lease_file)
+ try:
+ content = util.load_file(fallback_lease_file)
+ except IOError as ex:
+ LOG.error("Failed to read %s: %s", fallback_lease_file, ex)
+ return None
+
LOG.debug("content is %s", content)
option_name = _get_dhcp_endpoint_option_name()
for line in content.splitlines():
@@ -263,6 +456,7 @@ class WALinuxAgentShim(object):
return leases[-1]
@staticmethod
+ @azure_ds_telemetry_reporter
def _load_dhclient_json():
dhcp_options = {}
hooks_dir = WALinuxAgentShim._get_hooks_dir()
@@ -281,6 +475,7 @@ class WALinuxAgentShim(object):
return dhcp_options
@staticmethod
+ @azure_ds_telemetry_reporter
def _get_value_from_dhcpoptions(dhcp_options):
if dhcp_options is None:
return None
@@ -294,22 +489,26 @@ class WALinuxAgentShim(object):
return _value
@staticmethod
+ @azure_ds_telemetry_reporter
def find_endpoint(fallback_lease_file=None, dhcp245=None):
value = None
if dhcp245 is not None:
value = dhcp245
LOG.debug("Using Azure Endpoint from dhcp options")
if value is None:
+ report_diagnostic_event("No Azure endpoint from dhcp options")
LOG.debug('Finding Azure endpoint from networkd...')
value = WALinuxAgentShim._networkd_get_value_from_leases()
if value is None:
# Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
# a dhclient exit hook that calls cloud-init-dhclient-hook
+ report_diagnostic_event("No Azure endpoint from networkd")
LOG.debug('Finding Azure endpoint from hook json...')
dhcp_options = WALinuxAgentShim._load_dhclient_json()
value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
# Fallback and check the leases file if unsuccessful
+ report_diagnostic_event("No Azure endpoint from dhclient logs")
LOG.debug("Unable to find endpoint in dhclient logs. "
" Falling back to check lease files")
if fallback_lease_file is None:
@@ -320,16 +519,22 @@ class WALinuxAgentShim(object):
fallback_lease_file)
value = WALinuxAgentShim._get_value_from_leases_file(
fallback_lease_file)
-
if value is None:
- raise ValueError('No endpoint found.')
+ msg = "No lease found; using default endpoint"
+ report_diagnostic_event(msg)
+ LOG.warning(msg)
+ value = DEFAULT_WIRESERVER_ENDPOINT
endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
- LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+ msg = 'Azure endpoint found at %s' % endpoint_ip_address
+ report_diagnostic_event(msg)
+ LOG.debug(msg)
return endpoint_ip_address
- def register_with_azure_and_fetch_data(self):
- self.openssl_manager = OpenSSLManager()
+ @azure_ds_telemetry_reporter
+ def register_with_azure_and_fetch_data(self, pubkey_info=None):
+ if self.openssl_manager is None:
+ self.openssl_manager = OpenSSLManager()
http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
LOG.info('Registering with Azure...')
attempts = 0
@@ -337,27 +542,52 @@ class WALinuxAgentShim(object):
try:
response = http_client.get(
'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
- except Exception:
+ except Exception as e:
if attempts < 10:
time.sleep(attempts + 1)
else:
+ report_diagnostic_event(
+ "failed to register with Azure: %s" % e)
raise
else:
break
attempts += 1
LOG.debug('Successfully fetched GoalState XML.')
goal_state = GoalState(response.contents, http_client)
- public_keys = []
- if goal_state.certificates_xml is not None:
+ report_diagnostic_event("container_id %s" % goal_state.container_id)
+ ssh_keys = []
+ if goal_state.certificates_xml is not None and pubkey_info is not None:
LOG.debug('Certificate XML found; parsing out public keys.')
- public_keys = self.openssl_manager.parse_certificates(
+ keys_by_fingerprint = self.openssl_manager.parse_certificates(
goal_state.certificates_xml)
- data = {
- 'public-keys': public_keys,
- }
+ ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
self._report_ready(goal_state, http_client)
- return data
+ return {'public-keys': ssh_keys}
+
+ def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info):
+ """cloud-init expects a straightforward array of keys to be dropped
+ into the user's authorized_keys file. Azure control plane exposes
+ multiple public keys to the VM via wireserver. Select just the
+ user's key(s) and return them, ignoring any other certs.
+ """
+ keys = []
+ for pubkey in pubkey_info:
+ if 'value' in pubkey and pubkey['value']:
+ keys.append(pubkey['value'])
+ elif 'fingerprint' in pubkey and pubkey['fingerprint']:
+ fingerprint = pubkey['fingerprint']
+ if fingerprint in keys_by_fingerprint:
+ keys.append(keys_by_fingerprint[fingerprint])
+ else:
+ LOG.warning("ovf-env.xml specified PublicKey fingerprint "
+ "%s not found in goalstate XML", fingerprint)
+ else:
+ LOG.warning("ovf-env.xml specified PublicKey with neither "
+ "value nor fingerprint: %s", pubkey)
+ return keys
+
+ @azure_ds_telemetry_reporter
def _report_ready(self, goal_state, http_client):
LOG.debug('Reporting ready to Azure fabric.')
document = self.REPORT_READY_XML_TEMPLATE.format(
@@ -365,20 +595,49 @@ class WALinuxAgentShim(object):
container_id=goal_state.container_id,
instance_id=goal_state.instance_id,
)
- http_client.post(
- "http://{0}/machine?comp=health".format(self.endpoint),
- data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
- )
+ # Host will collect kvps when cloud-init reports ready.
+ # some kvps might still be in the queue. We yield the scheduler
+ # to make sure we process all kvps up till this point.
+ time.sleep(0)
+ try:
+ http_client.post(
+ "http://{0}/machine?comp=health".format(self.endpoint),
+ data=document,
+ extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
+ )
+ except Exception as e:
+ report_diagnostic_event("exception while reporting ready: %s" % e)
+ raise
+
LOG.info('Reported ready to Azure fabric.')
-def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None):
+@azure_ds_telemetry_reporter
+def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
+ pubkey_info=None):
shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
dhcp_options=dhcp_opts)
try:
- return shim.register_with_azure_and_fetch_data()
+ return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info)
finally:
shim.clean_up()
+
+class EphemeralDHCPv4WithReporting(object):
+ def __init__(self, reporter, nic=None):
+ self.reporter = reporter
+ self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic)
+
+ def __enter__(self):
+ with events.ReportEventStack(
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=self.reporter):
+ return self.ephemeralDHCPv4.__enter__()
+
+ def __exit__(self, excp_type, excp_value, excp_traceback):
+ self.ephemeralDHCPv4.__exit__(
+ excp_type, excp_value, excp_traceback)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 9c29ceac..441db506 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -12,15 +12,12 @@ import copy
import functools
import os
-import six
-
from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
-
from cloudinit.sources import BrokenMetadata
# See https://docs.openstack.org/user-guide/cli-config-drive.html
@@ -67,7 +64,7 @@ OS_VERSIONS = (
OS_ROCKY,
)
-PHYSICAL_TYPES = (
+KNOWN_PHYSICAL_TYPES = (
None,
'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
@@ -163,8 +160,7 @@ class SourceMixin(object):
return device
-@six.add_metaclass(abc.ABCMeta)
-class BaseReader(object):
+class BaseReader(metaclass=abc.ABCMeta):
def __init__(self, base_path):
self.base_path = base_path
@@ -227,7 +223,7 @@ class BaseReader(object):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list) + six.string_types)
+ util.load_json, root_types=(dict, list, str))
def datafiles(version):
files = {}
@@ -584,25 +580,31 @@ def convert_net_json(network_json=None, known_macs=None):
if n['link'] == link['id']]:
subnet = dict((k, v) for k, v in network.items()
if k in valid_keys['subnet'])
- if 'dhcp' in network['type']:
- t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
- subnet.update({
- 'type': t,
- })
- else:
+
+ if network['type'] == 'ipv4_dhcp':
+ subnet.update({'type': 'dhcp4'})
+ elif network['type'] == 'ipv6_dhcp':
+ subnet.update({'type': 'dhcp6'})
+ elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless',
+ 'ipv6_dhcpv6-stateful']:
+ subnet.update({'type': network['type']})
+ elif network['type'] in ['ipv4', 'ipv6']:
subnet.update({
'type': 'static',
'address': network.get('ip_address'),
})
+
+ # Enable accept_ra for stateful and legacy ipv6_dhcp types
+ if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']:
+ cfg.update({'accept-ra': True})
+
if network['type'] == 'ipv4':
subnet['ipv4'] = True
if network['type'] == 'ipv6':
subnet['ipv6'] = True
subnets.append(subnet)
cfg.update({'subnets': subnets})
- if link['type'] in PHYSICAL_TYPES:
- cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
- elif link['type'] in ['bond']:
+ if link['type'] in ['bond']:
params = {}
if link_mac_addr:
params['mac_address'] = link_mac_addr
@@ -641,8 +643,10 @@ def convert_net_json(network_json=None, known_macs=None):
curinfo.update({'mac': link['vlan_mac_address'],
'name': name})
else:
- raise ValueError(
- 'Unknown network_data link type: %s' % link['type'])
+ if link['type'] not in KNOWN_PHYSICAL_TYPES:
+ LOG.warning('Unknown network_data link type (%s); treating as'
+ ' physical', link['type'])
+ cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
config.append(cfg)
link_id_info[curinfo['id']] = curinfo
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index a7d4ad91..9f14770e 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -1,5 +1,5 @@
# Copyright (C) 2017 Canonical Ltd.
-# Copyright (C) 2017 VMware Inc.
+# Copyright (C) 2017-2019 VMware Inc.
#
# Author: Maitreyee Saikia <msaikia@vmware.com>
#
@@ -8,7 +8,6 @@
import logging
import os
import stat
-from textwrap import dedent
from cloudinit import util
@@ -20,12 +19,15 @@ class CustomScriptNotFound(Exception):
class CustomScriptConstant(object):
- RC_LOCAL = "/etc/rc.local"
- POST_CUST_TMP_DIR = "/root/.customization"
- POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh"
- POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR,
- POST_CUST_RUN_SCRIPT_NAME)
- POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
+ CUSTOM_TMP_DIR = "/root/.customization"
+
+ # The user defined custom script
+ CUSTOM_SCRIPT_NAME = "customize.sh"
+ CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR,
+ CUSTOM_SCRIPT_NAME)
+ POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
+ # The cc_scripts_per_instance script to launch custom script
+ POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh"
class RunCustomScript(object):
@@ -39,10 +41,19 @@ class RunCustomScript(object):
raise CustomScriptNotFound("Script %s not found!! "
"Cannot execute custom script!"
% self.scriptpath)
+
+ util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR)
+
+ LOG.debug("Copying custom script to %s",
+ CustomScriptConstant.CUSTOM_SCRIPT)
+ util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT)
+
# Strip any CR characters from the decoded script
- util.load_file(self.scriptpath).replace("\r", "")
- st = os.stat(self.scriptpath)
- os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC)
+ content = util.load_file(
+ CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "")
+ util.write_file(CustomScriptConstant.CUSTOM_SCRIPT,
+ content,
+ mode=0o544)
class PreCustomScript(RunCustomScript):
@@ -50,104 +61,34 @@ class PreCustomScript(RunCustomScript):
"""Executing custom script with precustomization argument."""
LOG.debug("Executing pre-customization script")
self.prepare_script()
- util.subp(["/bin/sh", self.scriptpath, "precustomization"])
+ util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
class PostCustomScript(RunCustomScript):
- def __init__(self, scriptname, directory):
+ def __init__(self, scriptname, directory, ccScriptsDir):
super(PostCustomScript, self).__init__(scriptname, directory)
- # Determine when to run custom script. When postreboot is True,
- # the user uploaded script will run as part of rc.local after
- # the machine reboots. This is determined by presence of rclocal.
- # When postreboot is False, script will run as part of cloud-init.
- self.postreboot = False
-
- def _install_post_reboot_agent(self, rclocal):
- """
- Install post-reboot agent for running custom script after reboot.
- As part of this process, we are editing the rclocal file to run a
- VMware script, which in turn is resposible for handling the user
- script.
- @param: path to rc local.
- """
- LOG.debug("Installing post-reboot customization from %s to %s",
- self.directory, rclocal)
- if not self.has_previous_agent(rclocal):
- LOG.info("Adding post-reboot customization agent to rc.local")
- new_content = dedent("""
- # Run post-reboot guest customization
- /bin/sh %s
- exit 0
- """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT
- existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '')
- st = os.stat(rclocal)
- # "x" flag should be set
- mode = st.st_mode | stat.S_IEXEC
- util.write_file(rclocal, existing_rclocal + new_content, mode)
-
- else:
- # We don't need to update rclocal file everytime a customization
- # is requested. It just needs to be done for the first time.
- LOG.info("Post-reboot guest customization agent is already "
- "registered in rc.local")
- LOG.debug("Installing post-reboot customization agent finished: %s",
- self.postreboot)
-
- def has_previous_agent(self, rclocal):
- searchstring = "# Run post-reboot guest customization"
- if searchstring in open(rclocal).read():
- return True
- return False
-
- def find_rc_local(self):
- """
- Determine if rc local is present.
- """
- rclocal = ""
- if os.path.exists(CustomScriptConstant.RC_LOCAL):
- LOG.debug("rc.local detected.")
- # resolving in case of symlink
- rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL)
- LOG.debug("rc.local resolved to %s", rclocal)
- else:
- LOG.warning("Can't find rc.local, post-customization "
- "will be run before reboot")
- return rclocal
-
- def install_agent(self):
- rclocal = self.find_rc_local()
- if rclocal:
- self._install_post_reboot_agent(rclocal)
- self.postreboot = True
+ self.ccScriptsDir = ccScriptsDir
+ self.ccScriptPath = os.path.join(
+ ccScriptsDir,
+ CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME)
def execute(self):
"""
- This method executes post-customization script before or after reboot
- based on the presence of rc local.
+ This method copy the post customize run script to
+ cc_scripts_per_instance directory and let this
+ module to run post custom script.
"""
self.prepare_script()
- self.install_agent()
- if not self.postreboot:
- LOG.warning("Executing post-customization script inline")
- util.subp(["/bin/sh", self.scriptpath, "postcustomization"])
- else:
- LOG.debug("Scheduling custom script to run post reboot")
- if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR):
- os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR)
- # Script "post-customize-guest.sh" and user uploaded script are
- # are present in the same directory and needs to copied to a temp
- # directory to be executed post reboot. User uploaded script is
- # saved as customize.sh in the temp directory.
- # post-customize-guest.sh excutes customize.sh after reboot.
- LOG.debug("Copying post-customization script")
- util.copy(self.scriptpath,
- CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh")
- LOG.debug("Copying script to run post-customization script")
- util.copy(
- os.path.join(self.directory,
- CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME),
- CustomScriptConstant.POST_CUST_RUN_SCRIPT)
- LOG.info("Creating post-reboot pending marker")
- util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER)
+
+ LOG.debug("Copying post customize run script to %s",
+ self.ccScriptPath)
+ util.copy(
+ os.path.join(self.directory,
+ CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME),
+ self.ccScriptPath)
+ st = os.stat(self.ccScriptPath)
+ os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC)
+ LOG.info("Creating post customization pending marker")
+ util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER)
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index db5a00dc..65ae7390 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -10,5 +10,6 @@ class GuestCustErrorEnum(object):
"""Specifies different errors of Guest Customization engine"""
GUESTCUST_ERROR_SUCCESS = 0
+ GUESTCUST_ERROR_SCRIPT_DISABLED = 6
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index a590f323..3d369d04 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -7,6 +7,7 @@
import logging
import os
+import re
import time
from cloudinit import util
@@ -117,4 +118,40 @@ def enable_nics(nics):
logger.warning("Can't connect network interfaces after %d attempts",
enableNicsWaitRetries)
+
+def get_tools_config(section, key, defaultVal):
+ """ Return the value of [section] key from VMTools configuration.
+
+ @param section: String of section to read from VMTools config
+ @returns: String value from key in [section] or defaultVal if
+ [section] is not present or vmware-toolbox-cmd is
+ not installed.
+ """
+
+ if not util.which('vmware-toolbox-cmd'):
+ logger.debug(
+ 'vmware-toolbox-cmd not installed, returning default value')
+ return defaultVal
+
+ retValue = defaultVal
+ cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
+
+ try:
+ (outText, _) = util.subp(cmd)
+ m = re.match(r'([^=]+)=(.*)', outText)
+ if m:
+ retValue = m.group(2).strip()
+ logger.debug("Get tools config: [%s] %s = %s",
+ section, key, retValue)
+ else:
+ logger.debug(
+ "Tools config: [%s] %s is not found, return default value: %s",
+ section, key, retValue)
+ except util.ProcessExecutionError as e:
+ logger.error("Failed running %s[%s]", cmd, e.exit_code)
+ logger.exception(e)
+
+ return retValue
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 6378e98b..f73b37ed 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -3,7 +3,6 @@
import copy
import inspect
import os
-import six
import stat
from cloudinit.event import EventType
@@ -13,7 +12,7 @@ from cloudinit.sources import (
EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource,
canonical_cloud_id, redact_sensitive_keys)
-from cloudinit.tests.helpers import CiTestCase, skipIf, mock
+from cloudinit.tests.helpers import CiTestCase, mock
from cloudinit.user_data import UserDataProcessor
from cloudinit import util
@@ -422,7 +421,6 @@ class TestDataSource(CiTestCase):
{'network_json': 'is good'},
instance_data['ds']['network_json'])
- @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes")
def test_get_data_base64encodes_unserializable_bytes(self):
"""On py3, get_data base64encodes any unserializable content."""
tmp = self.tmp_dir()
@@ -440,37 +438,6 @@ class TestDataSource(CiTestCase):
{'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
instance_json['ds']['meta_data'])
- @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
- def test_get_data_handles_bytes_values(self):
- """On py2 get_data handles bytes values without having to b64encode."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
- self.assertTrue(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- content = util.load_file(json_file)
- instance_json = util.load_json(content)
- self.assertEqual([], instance_json['base64_encoded_keys'])
- self.assertEqual(
- {'key1': 'val1', 'key2': {'key2.1': '\x123'}},
- instance_json['ds']['meta_data'])
-
- @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
- def test_non_utf8_encoding_logs_warning(self):
- """When non-utf-8 values exist in py2 instance-data is not written."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
- self.assertTrue(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- self.assertFalse(os.path.exists(json_file))
- self.assertIn(
- "WARNING: Error persisting instance-data.json: 'utf8' codec can't"
- " decode byte 0xaa in position 2: invalid start byte",
- self.logs.getvalue())
-
def test_get_hostname_subclass_support(self):
"""Validate get_hostname signature on all subclasses of DataSource."""
# Use inspect.getfullargspec when we drop py2.6 and py2.7
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
index 97d62947..abf3d359 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -1,27 +1,69 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.sources import DataSourceOracle as oracle
-from cloudinit.sources import BrokenMetadata
+from cloudinit.sources import BrokenMetadata, NetworkConfigSource
from cloudinit import helpers
from cloudinit.tests import helpers as test_helpers
from textwrap import dedent
import argparse
+import copy
import httpretty
import json
-import mock
import os
-import six
import uuid
+from unittest import mock
DS_PATH = "cloudinit.sources.DataSourceOracle"
MD_VER = "2013-10-17"
+# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine
+# with a secondary VNIC attached (vnicId truncated for Python line length)
+OPC_BM_SECONDARY_VNIC_RESPONSE = """\
+[ {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||",
+ "privateIp" : "10.0.0.8",
+ "vlanTag" : 0,
+ "macAddr" : "90:e2:ba:d4:f1:68",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24",
+ "nicIndex" : 0
+}, {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||",
+ "privateIp" : "10.0.4.5",
+ "vlanTag" : 1,
+ "macAddr" : "02:00:17:05:CF:51",
+ "virtualRouterIp" : "10.0.4.1",
+ "subnetCidrBlock" : "10.0.4.0/24",
+ "nicIndex" : 0
+} ]"""
+
+# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine
+# with a secondary VNIC attached
+OPC_VM_SECONDARY_VNIC_RESPONSE = """\
+[ {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated",
+ "privateIp" : "10.0.0.230",
+ "vlanTag" : 1039,
+ "macAddr" : "02:00:17:05:D1:DB",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24"
+}, {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated",
+ "privateIp" : "10.0.0.231",
+ "vlanTag" : 1041,
+ "macAddr" : "00:00:17:02:2B:B1",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24"
+} ]"""
+
class TestDataSourceOracle(test_helpers.CiTestCase):
"""Test datasource DataSourceOracle."""
+ with_logs = True
+
ds_class = oracle.DataSourceOracle
my_uuid = str(uuid.uuid4())
@@ -79,6 +121,16 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
self.assertEqual(
'metadata (http://169.254.169.254/openstack/)', ds.subplatform)
+ def test_sys_cfg_can_enable_configure_secondary_nics(self):
+ # Confirm that behaviour is toggled by sys_cfg
+ ds, _mocks = self._get_ds()
+ self.assertFalse(ds.ds_cfg['configure_secondary_nics'])
+
+ sys_cfg = {
+ 'datasource': {'Oracle': {'configure_secondary_nics': True}}}
+ ds, _mocks = self._get_ds(sys_cfg=sys_cfg)
+ self.assertTrue(ds.ds_cfg['configure_secondary_nics'])
+
@mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
def test_without_userdata(self, m_is_iscsi_root):
"""If no user-data is provided, it should not be in return dict."""
@@ -133,9 +185,12 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
self.assertEqual(my_userdata, ds.userdata_raw)
- @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
+ side_effect=lambda network_config: network_config)
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
@mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config):
+ def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config,
+ _m_add_network_config_from_opc_imds):
"""network_config should read kernel cmdline."""
distro = mock.MagicMock()
ds, _ = self._get_ds(distro=distro, patches={
@@ -145,15 +200,18 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
MD_VER: {'system_uuid': self.my_uuid,
'meta_data': self.my_md}}}})
ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_cmdline_config.return_value = ncfg
+ m_initramfs_config.return_value = ncfg
self.assertTrue(ds._get_data())
self.assertEqual(ncfg, ds.network_config)
- m_cmdline_config.assert_called_once_with()
+ self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
self.assertFalse(distro.generate_fallback_config.called)
- @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
+ side_effect=lambda network_config: network_config)
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
@mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config):
+ def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config,
+ _m_add_network_config_from_opc_imds):
"""test that fallback network is generated if no kernel cmdline."""
distro = mock.MagicMock()
ds, _ = self._get_ds(distro=distro, patches={
@@ -163,18 +221,95 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
MD_VER: {'system_uuid': self.my_uuid,
'meta_data': self.my_md}}}})
ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_cmdline_config.return_value = None
+ m_initramfs_config.return_value = None
self.assertTrue(ds._get_data())
ncfg = {'version': 1, 'config': [{'distro1': 'value'}]}
distro.generate_fallback_config.return_value = ncfg
self.assertEqual(ncfg, ds.network_config)
- m_cmdline_config.assert_called_once_with()
+ self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
distro.generate_fallback_config.assert_called_once_with()
- self.assertEqual(1, m_cmdline_config.call_count)
# test that the result got cached, and the methods not re-called.
self.assertEqual(ncfg, ds.network_config)
- self.assertEqual(1, m_cmdline_config.call_count)
+ self.assertEqual(1, m_initramfs_config.call_count)
+
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
+ return_value={'some': 'config'})
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_secondary_nics_added_to_network_config_if_enabled(
+ self, _m_is_iscsi_root, _m_initramfs_config,
+ m_add_network_config_from_opc_imds):
+
+ needle = object()
+
+ def network_config_side_effect(network_config):
+ network_config['secondary_added'] = needle
+
+ m_add_network_config_from_opc_imds.side_effect = (
+ network_config_side_effect)
+
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ds.ds_cfg['configure_secondary_nics'] = True
+ self.assertEqual(needle, ds.network_config['secondary_added'])
+
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
+ return_value={'some': 'config'})
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_secondary_nics_not_added_to_network_config_by_default(
+ self, _m_is_iscsi_root, _m_initramfs_config,
+ m_add_network_config_from_opc_imds):
+
+ def network_config_side_effect(network_config):
+ network_config['secondary_added'] = True
+
+ m_add_network_config_from_opc_imds.side_effect = (
+ network_config_side_effect)
+
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ self.assertNotIn('secondary_added', ds.network_config)
+
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_secondary_nic_failure_isnt_blocking(
+ self, _m_is_iscsi_root, m_initramfs_config,
+ m_add_network_config_from_opc_imds):
+
+ m_add_network_config_from_opc_imds.side_effect = Exception()
+
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ds.ds_cfg['configure_secondary_nics'] = True
+ self.assertEqual(ds.network_config, m_initramfs_config.return_value)
+ self.assertIn('Failed to fetch secondary network configuration',
+ self.logs.getvalue())
+
+ def test_ds_network_cfg_preferred_over_initramfs(self):
+ """Ensure that DS net config is preferred over initramfs config"""
+ network_config_sources = oracle.DataSourceOracle.network_config_sources
+ self.assertLess(
+ network_config_sources.index(NetworkConfigSource.ds),
+ network_config_sources.index(NetworkConfigSource.initramfs)
+ )
@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4()))
@@ -198,7 +333,7 @@ class TestReadMetaData(test_helpers.HttprettyTestCase):
for k, v in data.items():
httpretty.register_uri(
httpretty.GET, self.mdurl + MD_VER + "/" + k,
- v if not isinstance(v, six.text_type) else v.encode('utf-8'))
+ v if not isinstance(v, str) else v.encode('utf-8'))
def test_broken_no_sys_uuid(self, m_read_system_uuid):
"""Datasource requires ability to read system_uuid and true return."""
@@ -336,4 +471,265 @@ class TestLoadIndex(test_helpers.CiTestCase):
oracle._load_index("\n".join(["meta_data.json", "user_data"])))
+class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestNetworkConfigFromOpcImds, self).setUp()
+ self.add_patch(DS_PATH + '.readurl', 'm_readurl')
+ self.add_patch(DS_PATH + '.get_interfaces_by_mac',
+ 'm_get_interfaces_by_mac')
+
+ def test_failure_to_readurl(self):
+ # readurl failures should just bubble out to the caller
+ self.m_readurl.side_effect = Exception('oh no')
+ with self.assertRaises(Exception) as excinfo:
+ oracle._add_network_config_from_opc_imds({})
+ self.assertEqual(str(excinfo.exception), 'oh no')
+
+ def test_empty_response(self):
+ # empty response error should just bubble out to the caller
+ self.m_readurl.return_value = ''
+ with self.assertRaises(Exception):
+ oracle._add_network_config_from_opc_imds([])
+
+ def test_invalid_json(self):
+ # invalid JSON error should just bubble out to the caller
+ self.m_readurl.return_value = '{'
+ with self.assertRaises(Exception):
+ oracle._add_network_config_from_opc_imds([])
+
+ def test_no_secondary_nics_does_not_mutate_input(self):
+ self.m_readurl.return_value = json.dumps([{}])
+ # We test this by passing in a non-dict to ensure that no dict
+ # operations are used; failure would be seen as exceptions
+ oracle._add_network_config_from_opc_imds(object())
+
+ def test_bare_metal_machine_skipped(self):
+ # nicIndex in the first entry indicates a bare metal machine
+ self.m_readurl.return_value = OPC_BM_SECONDARY_VNIC_RESPONSE
+ # We test this by passing in a non-dict to ensure that no dict
+ # operations are used
+ self.assertFalse(oracle._add_network_config_from_opc_imds(object()))
+ self.assertIn('bare metal machine', self.logs.getvalue())
+
+ def test_missing_mac_skipped(self):
+ self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
+ self.m_get_interfaces_by_mac.return_value = {}
+
+ network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
+ oracle._add_network_config_from_opc_imds(network_config)
+
+ self.assertEqual(1, len(network_config['config']))
+ self.assertIn(
+ 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
+ self.logs.getvalue())
+
+ def test_missing_mac_skipped_v2(self):
+ self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
+ self.m_get_interfaces_by_mac.return_value = {}
+
+ network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
+ oracle._add_network_config_from_opc_imds(network_config)
+
+ self.assertEqual(1, len(network_config['ethernets']))
+ self.assertIn(
+ 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
+ self.logs.getvalue())
+
+ def test_secondary_nic(self):
+ self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+
+ network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
+ oracle._add_network_config_from_opc_imds(network_config)
+
+ # The input is mutated
+ self.assertEqual(2, len(network_config['config']))
+
+ secondary_nic_cfg = network_config['config'][1]
+ self.assertEqual(nic_name, secondary_nic_cfg['name'])
+ self.assertEqual('physical', secondary_nic_cfg['type'])
+ self.assertEqual(mac_addr, secondary_nic_cfg['mac_address'])
+ self.assertEqual(9000, secondary_nic_cfg['mtu'])
+
+ self.assertEqual(1, len(secondary_nic_cfg['subnets']))
+ subnet_cfg = secondary_nic_cfg['subnets'][0]
+ # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
+ self.assertEqual('10.0.0.231', subnet_cfg['address'])
+
+ def test_secondary_nic_v2(self):
+ self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+
+ network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
+ oracle._add_network_config_from_opc_imds(network_config)
+
+ # The input is mutated
+ self.assertEqual(2, len(network_config['ethernets']))
+
+ secondary_nic_cfg = network_config['ethernets']['ens3']
+ self.assertFalse(secondary_nic_cfg['dhcp4'])
+ self.assertFalse(secondary_nic_cfg['dhcp6'])
+ self.assertEqual(mac_addr, secondary_nic_cfg['match']['macaddress'])
+ self.assertEqual(9000, secondary_nic_cfg['mtu'])
+
+ self.assertEqual(1, len(secondary_nic_cfg['addresses']))
+ # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
+ self.assertEqual('10.0.0.231', secondary_nic_cfg['addresses'][0])
+
+
+class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestNetworkConfigFiltersNetFailover, self).setUp()
+ self.add_patch(DS_PATH + '.get_interfaces_by_mac',
+ 'm_get_interfaces_by_mac')
+ self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master')
+
+ def test_ignore_bogus_network_config(self):
+ netcfg = {'something': 'here'}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+
+ def test_ignore_network_config_unknown_versions(self):
+ netcfg = {'something': 'here', 'version': 3}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+
+ def test_checks_v1_type_physical_interfaces(self):
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {'version': 1, 'config': [
+ {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr,
+ 'subnets': [{'type': 'dhcp4'}]}]}
+ passed_netcfg = copy.copy(netcfg)
+ self.m_netfail_master.return_value = False
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual([mock.call(nic_name)],
+ self.m_netfail_master.call_args_list)
+
+ def test_checks_v1_skips_non_phys_interfaces(self):
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {'version': 1, 'config': [
+ {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr,
+ 'subnets': [{'type': 'dhcp4'}]}]}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(0, self.m_netfail_master.call_count)
+
+ def test_removes_master_mac_property_v1(self):
+ nic_master, mac_master = 'ens3', self.random_string()
+ nic_other, mac_other = 'ens7', self.random_string()
+ nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_master: nic_master,
+ mac_other: nic_other,
+ mac_extra: nic_extra,
+ }
+ netcfg = {'version': 1, 'config': [
+ {'type': 'physical', 'name': nic_master,
+ 'mac_address': mac_master},
+ {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
+ {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
+ ]}
+
+ def _is_netfail_master(iface):
+ if iface == 'ens3':
+ return True
+ return False
+ self.m_netfail_master.side_effect = _is_netfail_master
+ expected_cfg = {'version': 1, 'config': [
+ {'type': 'physical', 'name': nic_master},
+ {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
+ {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
+ ]}
+ oracle._ensure_netfailover_safe(netcfg)
+ self.assertEqual(expected_cfg, netcfg)
+
+ def test_checks_v2_type_ethernet_interfaces(self):
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {'version': 2, 'ethernets': {
+ nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
+ 'match': {'macaddress': mac_addr}}}}
+ passed_netcfg = copy.copy(netcfg)
+ self.m_netfail_master.return_value = False
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual([mock.call(nic_name)],
+ self.m_netfail_master.call_args_list)
+
+ def test_skips_v2_non_ethernet_interfaces(self):
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {'version': 2, 'wifis': {
+ nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
+ 'match': {'macaddress': mac_addr}}}}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(0, self.m_netfail_master.call_count)
+
+ def test_removes_master_mac_property_v2(self):
+ nic_master, mac_master = 'ens3', self.random_string()
+ nic_other, mac_other = 'ens7', self.random_string()
+ nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_master: nic_master,
+ mac_other: nic_other,
+ mac_extra: nic_extra,
+ }
+ netcfg = {'version': 2, 'ethernets': {
+ nic_extra: {'dhcp4': True, 'set-name': nic_extra,
+ 'match': {'macaddress': mac_extra}},
+ nic_other: {'dhcp4': True, 'set-name': nic_other,
+ 'match': {'macaddress': mac_other}},
+ nic_master: {'dhcp4': True, 'set-name': nic_master,
+ 'match': {'macaddress': mac_master}},
+ }}
+
+ def _is_netfail_master(iface):
+ if iface == 'ens3':
+ return True
+ return False
+ self.m_netfail_master.side_effect = _is_netfail_master
+
+ expected_cfg = {'version': 2, 'ethernets': {
+ nic_master: {'dhcp4': True, 'match': {'name': nic_master}},
+ nic_extra: {'dhcp4': True, 'set-name': nic_extra,
+ 'match': {'macaddress': mac_extra}},
+ nic_other: {'dhcp4': True, 'set-name': nic_other,
+ 'match': {'macaddress': mac_other}},
+ }}
+ oracle._ensure_netfailover_safe(netcfg)
+ import pprint
+ pprint.pprint(netcfg)
+ print('---- ^^ modified ^^ ---- vv original vv ----')
+ pprint.pprint(expected_cfg)
+ self.assertEqual(expected_cfg, netcfg)
+
+
# vi: ts=4 expandtab