summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py24
-rw-r--r--cloudinit/sources/DataSourceAzure.py256
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py2
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py13
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py2
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py9
-rw-r--r--cloudinit/sources/DataSourceOracle.py233
-rw-r--r--cloudinit/sources/DataSourceScaleway.py54
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py31
-rw-r--r--cloudinit/sources/__init__.py134
-rw-r--r--cloudinit/sources/helpers/openstack.py38
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py2
-rw-r--r--cloudinit/sources/tests/test_init.py192
-rw-r--r--cloudinit/sources/tests/test_oracle.py331
14 files changed, 1183 insertions, 138 deletions
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 24fd65ff..8cd312d0 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -181,27 +181,18 @@ class DataSourceAltCloud(sources.DataSource):
# modprobe floppy
try:
- cmd = CMD_PROBE_FLOPPY
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
+ modprobe_floppy()
except ProcessExecutionError as e:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
- return False
- except OSError as e:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
+ util.logexc(LOG, 'Failed modprobe: %s', e)
return False
floppy_dev = '/dev/fd0'
# udevadm settle for floppy device
try:
- (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)
- LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
- except ProcessExecutionError as e:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
- return False
- except OSError as e:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
+ util.udevadm_settle(exists=floppy_dev, timeout=5)
+ except (ProcessExecutionError, OSError) as e:
+ util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
return False
try:
@@ -258,6 +249,11 @@ class DataSourceAltCloud(sources.DataSource):
return False
+def modprobe_floppy():
+ out, _err = util.subp(CMD_PROBE_FLOPPY)
+ LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
+
+
# Used to match classes to dependencies
# Source DataSourceAltCloud does not really depend on networking.
# In the future 'dsmode' like behavior can be added to offer user
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 7007d9ea..783445e1 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -8,6 +8,7 @@ import base64
import contextlib
import crypt
from functools import partial
+import json
import os
import os.path
import re
@@ -17,6 +18,7 @@ import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.event import EventType
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
from cloudinit.sources.helpers.azure import get_metadata_from_fabric
@@ -49,7 +51,17 @@ DEFAULT_FS = 'ext4'
AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
-IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
+AGENT_SEED_DIR = '/var/lib/waagent'
+IMDS_URL = "http://169.254.169.254/metadata/"
+
+# List of static scripts and network config artifacts created by
+# stock ubuntu suported images.
+UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
+ '/etc/netplan/90-azure-hotplug.yaml',
+ '/usr/local/sbin/ephemeral_eth.sh',
+ '/etc/udev/rules.d/10-net-device-added.rules',
+ '/run/network/interfaces.ephemeral.d',
+]
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -185,7 +197,7 @@ if util.is_FreeBSD():
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START_BUILTIN,
- 'data_dir': "/var/lib/waagent",
+ 'data_dir': AGENT_SEED_DIR,
'set_hostname': True,
'hostname_bounce': {
'interface': DEFAULT_PRIMARY_NIC,
@@ -252,6 +264,7 @@ class DataSourceAzure(sources.DataSource):
dsname = 'Azure'
_negotiated = False
+ _metadata_imds = sources.UNSET
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -263,6 +276,8 @@ class DataSourceAzure(sources.DataSource):
BUILTIN_DS_CONFIG])
self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
self._network_config = None
+ # Regenerate network config new_instance boot and every boot
+ self.update_events['network'].add(EventType.BOOT)
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -336,15 +351,17 @@ class DataSourceAzure(sources.DataSource):
metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
return metadata
- def _get_data(self):
+ def crawl_metadata(self):
+ """Walk all instance metadata sources returning a dict on success.
+
+ @return: A dictionary of any metadata content for this instance.
+ @raise: InvalidMetaDataException when the expected metadata service is
+ unavailable, broken or disabled.
+ """
+ crawled_data = {}
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
- asset_tag = util.read_dmi_data('chassis-asset-tag')
- if asset_tag != AZURE_CHASSIS_ASSET_TAG:
- LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
- return False
-
ddir = self.ds_cfg['data_dir']
candidates = [self.seed_dir]
@@ -373,41 +390,84 @@ class DataSourceAzure(sources.DataSource):
except NonAzureDataSource:
continue
except BrokenAzureDataSource as exc:
- raise exc
+ msg = 'BrokenAzureDataSource: %s' % exc
+ raise sources.InvalidMetaDataException(msg)
except util.MountFailedError:
LOG.warning("%s was not mountable", cdev)
continue
if reprovision or self._should_reprovision(ret):
ret = self._reprovision()
- (md, self.userdata_raw, cfg, files) = ret
+ imds_md = get_metadata_from_imds(
+ self.fallback_interface, retries=3)
+ (md, userdata_raw, cfg, files) = ret
self.seed = cdev
- self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
- self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
+ crawled_data.update({
+ 'cfg': cfg,
+ 'files': files,
+ 'metadata': util.mergemanydict(
+ [md, {'imds': imds_md}]),
+ 'userdata_raw': userdata_raw})
found = cdev
LOG.debug("found datasource in %s", cdev)
break
if not found:
- return False
+ raise sources.InvalidMetaDataException('No Azure metadata found')
if found == ddir:
LOG.debug("using files cached in %s", ddir)
seed = _get_random_seed()
if seed:
- self.metadata['random_seed'] = seed
+ crawled_data['metadata']['random_seed'] = seed
+ crawled_data['metadata']['instance-id'] = util.read_dmi_data(
+ 'system-uuid')
+ return crawled_data
+
+ def _is_platform_viable(self):
+ """Check platform environment to report if this datasource may run."""
+ return _is_platform_viable(self.seed_dir)
+
+ def clear_cached_attrs(self, attr_defaults=()):
+ """Reset any cached class attributes to defaults."""
+ super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
+ self._metadata_imds = sources.UNSET
+
+ def _get_data(self):
+ """Crawl and process datasource metadata caching metadata as attrs.
+
+ @return: True on success, False on error, invalid or disabled
+ datasource.
+ """
+ if not self._is_platform_viable():
+ return False
+ try:
+ crawled_data = util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self.crawl_metadata)
+ except sources.InvalidMetaDataException as e:
+ LOG.warning('Could not crawl Azure metadata: %s', e)
+ return False
+ if self.distro and self.distro.name == 'ubuntu':
+ maybe_remove_ubuntu_network_config_scripts()
+
+ # Process crawled data and augment with various config defaults
+ self.cfg = util.mergemanydict(
+ [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG])
+ self._metadata_imds = crawled_data['metadata']['imds']
+ self.metadata = util.mergemanydict(
+ [crawled_data['metadata'], DEFAULT_METADATA])
+ self.userdata_raw = crawled_data['userdata_raw']
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
# walinux agent writes files world readable, but expects
# the directory to be protected.
- write_files(ddir, files, dirmode=0o700)
-
- self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
-
+ write_files(
+ self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
return True
def device_name_to_device(self, name):
@@ -436,7 +496,7 @@ class DataSourceAzure(sources.DataSource):
def _poll_imds(self):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
- url = IMDS_URL + "?api-version=2017-04-02"
+ url = IMDS_URL + "reprovisiondata?api-version=2017-04-02"
headers = {"Metadata": "true"}
report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
LOG.debug("Start polling IMDS")
@@ -487,7 +547,7 @@ class DataSourceAzure(sources.DataSource):
jump back into the polling loop in order to retrieve the ovf_env."""
if not ret:
return False
- (_md, self.userdata_raw, cfg, _files) = ret
+ (_md, _userdata_raw, cfg, _files) = ret
path = REPROVISION_MARKER_FILE
if (cfg.get('PreprovisionedVm') is True or
os.path.isfile(path)):
@@ -543,22 +603,15 @@ class DataSourceAzure(sources.DataSource):
@property
def network_config(self):
"""Generate a network config like net.generate_fallback_network() with
- the following execptions.
+ the following exceptions.
1. Probe the drivers of the net-devices present and inject them in
the network configuration under params: driver: <driver> value
2. Generate a fallback network config that does not include any of
the blacklisted devices.
"""
- blacklist = ['mlx4_core']
if not self._network_config:
- LOG.debug('Azure: generating fallback configuration')
- # generate a network config, blacklist picking any mlx4_core devs
- netconfig = net.generate_fallback_config(
- blacklist_drivers=blacklist, config_driver=True)
-
- self._network_config = netconfig
-
+ self._network_config = parse_network_config(self._metadata_imds)
return self._network_config
@@ -1025,6 +1078,151 @@ def load_azure_ds_dir(source_dir):
return (md, ud, cfg, {'ovf-env.xml': contents})
+def parse_network_config(imds_metadata):
+ """Convert imds_metadata dictionary to network v2 configuration.
+
+ Parses network configuration from imds metadata if present or generate
+ fallback network config excluding mlx4_core devices.
+
+ @param: imds_metadata: Dict of content read from IMDS network service.
+ @return: Dictionary containing network version 2 standard configuration.
+ """
+ if imds_metadata != sources.UNSET and imds_metadata:
+ netconfig = {'version': 2, 'ethernets': {}}
+ LOG.debug('Azure: generating network configuration from IMDS')
+ network_metadata = imds_metadata['network']
+ for idx, intf in enumerate(network_metadata['interface']):
+ nicname = 'eth{idx}'.format(idx=idx)
+ dev_config = {}
+ for addr4 in intf['ipv4']['ipAddress']:
+ privateIpv4 = addr4['privateIpAddress']
+ if privateIpv4:
+ if dev_config.get('dhcp4', False):
+ # Append static address config for nic > 1
+ netPrefix = intf['ipv4']['subnet'][0].get(
+ 'prefix', '24')
+ if not dev_config.get('addresses'):
+ dev_config['addresses'] = []
+ dev_config['addresses'].append(
+ '{ip}/{prefix}'.format(
+ ip=privateIpv4, prefix=netPrefix))
+ else:
+ dev_config['dhcp4'] = True
+ for addr6 in intf['ipv6']['ipAddress']:
+ privateIpv6 = addr6['privateIpAddress']
+ if privateIpv6:
+ dev_config['dhcp6'] = True
+ break
+ if dev_config:
+ mac = ':'.join(re.findall(r'..', intf['macAddress']))
+ dev_config.update(
+ {'match': {'macaddress': mac.lower()},
+ 'set-name': nicname})
+ netconfig['ethernets'][nicname] = dev_config
+ else:
+ blacklist = ['mlx4_core']
+ LOG.debug('Azure: generating fallback configuration')
+ # generate a network config, blacklist picking mlx4_core devs
+ netconfig = net.generate_fallback_config(
+ blacklist_drivers=blacklist, config_driver=True)
+ return netconfig
+
+
+def get_metadata_from_imds(fallback_nic, retries):
+ """Query Azure's network metadata service, returning a dictionary.
+
+ If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
+ IMDS. For more info on IMDS:
+ https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
+
+ @param fallback_nic: String. The name of the nic which requires active
+ network in order to query IMDS.
+ @param retries: The number of retries of the IMDS_URL.
+
+ @return: A dict of instance metadata containing compute and network
+ info.
+ """
+ kwargs = {'logfunc': LOG.debug,
+ 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
+ 'func': _get_metadata_from_imds, 'args': (retries,)}
+ if net.is_up(fallback_nic):
+ return util.log_time(**kwargs)
+ else:
+ with EphemeralDHCPv4(fallback_nic):
+ return util.log_time(**kwargs)
+
+
+def _get_metadata_from_imds(retries):
+
+ def retry_on_url_error(msg, exception):
+ if isinstance(exception, UrlError) and exception.code == 404:
+ return True # Continue retries
+ return False # Stop retries on all other exceptions
+
+ url = IMDS_URL + "instance?api-version=2017-12-01"
+ headers = {"Metadata": "true"}
+ try:
+ response = readurl(
+ url, timeout=1, headers=headers, retries=retries,
+ exception_cb=retry_on_url_error)
+ except Exception as e:
+ LOG.debug('Ignoring IMDS instance metadata: %s', e)
+ return {}
+ try:
+ return util.load_json(str(response))
+ except json.decoder.JSONDecodeError:
+ LOG.warning(
+ 'Ignoring non-json IMDS instance metadata: %s', str(response))
+ return {}
+
+
+def maybe_remove_ubuntu_network_config_scripts(paths=None):
+ """Remove Azure-specific ubuntu network config for non-primary nics.
+
+ @param paths: List of networking scripts or directories to remove when
+ present.
+
+ In certain supported ubuntu images, static udev rules or netplan yaml
+ config is delivered in the base ubuntu image to support dhcp on any
+ additional interfaces which get attached by a customer at some point
+ after initial boot. Since the Azure datasource can now regenerate
+ network configuration as metadata reports these new devices, we no longer
+ want the udev rules or netplan's 90-azure-hotplug.yaml to configure
+ networking on eth1 or greater as it might collide with cloud-init's
+ configuration.
+
+ Remove the any existing extended network scripts if the datasource is
+ enabled to write network per-boot.
+ """
+ if not paths:
+ paths = UBUNTU_EXTENDED_NETWORK_SCRIPTS
+ logged = False
+ for path in paths:
+ if os.path.exists(path):
+ if not logged:
+ LOG.info(
+ 'Removing Ubuntu extended network scripts because'
+ ' cloud-init updates Azure network configuration on the'
+ ' following event: %s.',
+ EventType.BOOT)
+ logged = True
+ if os.path.isdir(path):
+ util.del_dir(path)
+ else:
+ util.del_file(path)
+
+
+def _is_platform_viable(seed_dir):
+ """Check platform environment to report if this datasource may run."""
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ if asset_tag == AZURE_CHASSIS_ASSET_TAG:
+ return True
+ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
+ if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+ return True
+ return False
+
+
class BrokenAzureDataSource(Exception):
pass
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 4cb28977..664dc4b7 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -196,7 +196,7 @@ def on_first_boot(data, distro=None, network=True):
net_conf = data.get("network_config", '')
if net_conf and distro:
LOG.warning("Updating network interfaces from config drive")
- distro.apply_network(net_conf)
+ distro.apply_network_config(eni.convert_eni_data(net_conf))
write_injected_files(data.get('files'))
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 01106ec0..a5358148 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -295,7 +295,7 @@ def read_md():
results = metadata_from_dir(path)
else:
results = util.mount_cb(path, metadata_from_dir)
- except BrokenMetadata as e:
+ except sources.BrokenMetadata as e:
raise RuntimeError(
"Failed reading IBM config disk (platform=%s path=%s): %s" %
(platform, path, e))
@@ -304,10 +304,6 @@ def read_md():
return ret
-class BrokenMetadata(IOError):
- pass
-
-
def metadata_from_dir(source_dir):
"""Walk source_dir extracting standardized metadata.
@@ -352,12 +348,13 @@ def metadata_from_dir(source_dir):
try:
data = transl(raw)
except Exception as e:
- raise BrokenMetadata("Failed decoding %s: %s" % (path, e))
+ raise sources.BrokenMetadata(
+ "Failed decoding %s: %s" % (path, e))
results[name] = data
if results.get('metadata_raw') is None:
- raise BrokenMetadata(
+ raise sources.BrokenMetadata(
"%s missing required file 'meta_data.json'" % source_dir)
results['metadata'] = {}
@@ -368,7 +365,7 @@ def metadata_from_dir(source_dir):
try:
md['random_seed'] = base64.b64decode(md_raw['random_seed'])
except (ValueError, TypeError) as e:
- raise BrokenMetadata(
+ raise sources.BrokenMetadata(
"Badly formatted metadata random_seed entry: %s" % e)
renames = (
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 16c10785..77ccd128 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -232,7 +232,7 @@ class OpenNebulaNetwork(object):
# Set IPv6 default gateway
gateway6 = self.get_gateway6(c_dev)
- if gateway:
+ if gateway6:
devconf['gateway6'] = gateway6
# Set DNS servers and search domains
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 365af96a..4a015240 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -13,6 +13,7 @@ from cloudinit import url_helper
from cloudinit import util
from cloudinit.sources.helpers import openstack
+from cloudinit.sources import DataSourceOracle as oracle
LOG = logging.getLogger(__name__)
@@ -121,8 +122,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
False when unable to contact metadata service or when metadata
format is invalid or disabled.
"""
- if not detect_openstack():
+ oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list')
+ if not detect_openstack(accept_oracle=not oracle_considered):
return False
+
if self.perform_dhcp_setup: # Setup networking in init-local stage.
try:
with EphemeralDHCPv4(self.fallback_interface):
@@ -214,7 +217,7 @@ def read_metadata_service(base_url, ssl_details=None,
return reader.read_v2()
-def detect_openstack():
+def detect_openstack(accept_oracle=False):
"""Return True when a potential OpenStack platform is detected."""
if not util.is_x86():
return True # Non-Intel cpus don't properly report dmi product names
@@ -223,6 +226,8 @@ def detect_openstack():
return True
elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
return True
+ elif accept_oracle and oracle._is_platform_viable():
+ return True
elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
return True
return False
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
new file mode 100644
index 00000000..fab39af3
--- /dev/null
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -0,0 +1,233 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Datasource for Oracle (OCI/Oracle Cloud Infrastructure)
+
+OCI provides a OpenStack like metadata service which provides only
+'2013-10-17' and 'latest' versions..
+
+Notes:
+ * This datasource does not support the OCI-Classic. OCI-Classic
+ provides an EC2 lookalike metadata service.
+ * The uuid provided in DMI data is not the same as the meta-data provided
+ instance-id, but has an equivalent lifespan.
+ * We do need to support upgrade from an instance that cloud-init
+ identified as OpenStack.
+ * Both bare-metal and vms use iscsi root
+ * Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com
+"""
+
+from cloudinit.url_helper import combine_url, readurl, UrlError
+from cloudinit.net import dhcp
+from cloudinit import net
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.net import cmdline
+from cloudinit import log as logging
+
+import json
+import re
+
+LOG = logging.getLogger(__name__)
+
+CHASSIS_ASSET_TAG = "OracleCloud.com"
+METADATA_ENDPOINT = "http://169.254.169.254/openstack/"
+
+
+class DataSourceOracle(sources.DataSource):
+
+ dsname = 'Oracle'
+ system_uuid = None
+ vendordata_pure = None
+ _network_config = sources.UNSET
+
+ def _is_platform_viable(self):
+ """Check platform environment to report if this datasource may run."""
+ return _is_platform_viable()
+
+ def _get_data(self):
+ if not self._is_platform_viable():
+ return False
+
+ # network may be configured if iscsi root. If that is the case
+ # then read_kernel_cmdline_config will return non-None.
+ if _is_iscsi_root():
+ data = self.crawl_metadata()
+ else:
+ with dhcp.EphemeralDHCPv4(net.find_fallback_nic()):
+ data = self.crawl_metadata()
+
+ self._crawled_metadata = data
+ vdata = data['2013-10-17']
+
+ self.userdata_raw = vdata.get('user_data')
+ self.system_uuid = vdata['system_uuid']
+
+ vd = vdata.get('vendor_data')
+ if vd:
+ self.vendordata_pure = vd
+ try:
+ self.vendordata_raw = sources.convert_vendordata(vd)
+ except ValueError as e:
+ LOG.warning("Invalid content in vendor-data: %s", e)
+ self.vendordata_raw = None
+
+ mdcopies = ('public_keys',)
+ md = dict([(k, vdata['meta_data'].get(k))
+ for k in mdcopies if k in vdata['meta_data']])
+
+ mdtrans = (
+ # oracle meta_data.json name, cloudinit.datasource.metadata name
+ ('availability_zone', 'availability-zone'),
+ ('hostname', 'local-hostname'),
+ ('launch_index', 'launch-index'),
+ ('uuid', 'instance-id'),
+ )
+ for dsname, ciname in mdtrans:
+ if dsname in vdata['meta_data']:
+ md[ciname] = vdata['meta_data'][dsname]
+
+ self.metadata = md
+ return True
+
+ def crawl_metadata(self):
+ return read_metadata()
+
+ def check_instance_id(self, sys_cfg):
+ """quickly check (local only) if self.instance_id is still valid
+
+ On Oracle, the dmi-provided system uuid differs from the instance-id
+ but has the same life-span."""
+ return sources.instance_id_matches_system_uuid(self.system_uuid)
+
+ def get_public_ssh_keys(self):
+ return sources.normalize_pubkey_data(self.metadata.get('public_keys'))
+
+ @property
+ def network_config(self):
+ """Network config is read from initramfs provided files
+ If none is present, then we fall back to fallback configuration.
+
+ One thing to note here is that this method is not currently
+ considered at all if there is is kernel/initramfs provided
+ data. In that case, stages considers that the cmdline data
+ overrides datasource provided data and does not consult here.
+
+ We nonetheless return cmdline provided config if present
+ and fallback to generate fallback."""
+ if self._network_config == sources.UNSET:
+ cmdline_cfg = cmdline.read_kernel_cmdline_config()
+ if cmdline_cfg:
+ self._network_config = cmdline_cfg
+ else:
+ self._network_config = self.distro.generate_fallback_config()
+ return self._network_config
+
+
+def _read_system_uuid():
+ sys_uuid = util.read_dmi_data('system-uuid')
+ return None if sys_uuid is None else sys_uuid.lower()
+
+
+def _is_platform_viable():
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ return asset_tag == CHASSIS_ASSET_TAG
+
+
+def _is_iscsi_root():
+ return bool(cmdline.read_kernel_cmdline_config())
+
+
+def _load_index(content):
+ """Return a list entries parsed from content.
+
+ OpenStack's metadata service returns a newline delimited list
+ of items. Oracle's implementation has html formatted list of links.
+ The parser here just grabs targets from <a href="target">
+ and throws away "../".
+
+ Oracle has accepted that to be buggy and may fix in the future
+ to instead return a '\n' delimited plain text list. This function
+ will continue to work if that change is made."""
+ if not content.lower().startswith("<html>"):
+ return content.splitlines()
+ items = re.findall(
+ r'href="(?P<target>[^"]*)"', content, re.MULTILINE | re.IGNORECASE)
+ return [i for i in items if not i.startswith(".")]
+
+
+def read_metadata(endpoint_base=METADATA_ENDPOINT, sys_uuid=None,
+ version='2013-10-17'):
+ """Read metadata, return a dictionary.
+
+ Each path listed in the index will be represented in the dictionary.
+ If the path ends in .json, then the content will be decoded and
+ populated into the dictionary.
+
+ The system uuid (/sys/class/dmi/id/product_uuid) is also populated.
+ Example: given paths = ('user_data', 'meta_data.json')
+ This would return:
+ {version: {'user_data': b'blob', 'meta_data': json.loads(blob.decode())
+ 'system_uuid': '3b54f2e0-3ab2-458d-b770-af9926eee3b2'}}
+ """
+ endpoint = combine_url(endpoint_base, version) + "/"
+ if sys_uuid is None:
+ sys_uuid = _read_system_uuid()
+ if not sys_uuid:
+ raise sources.BrokenMetadata("Failed to read system uuid.")
+
+ try:
+ resp = readurl(endpoint)
+ if not resp.ok():
+ raise sources.BrokenMetadata(
+ "Bad response from %s: %s" % (endpoint, resp.code))
+ except UrlError as e:
+ raise sources.BrokenMetadata(
+ "Failed to read index at %s: %s" % (endpoint, e))
+
+ entries = _load_index(resp.contents.decode('utf-8'))
+ LOG.debug("index url %s contained: %s", endpoint, entries)
+
+ # meta_data.json is required.
+ mdj = 'meta_data.json'
+ if mdj not in entries:
+ raise sources.BrokenMetadata(
+ "Required field '%s' missing in index at %s" % (mdj, endpoint))
+
+ ret = {'system_uuid': sys_uuid}
+ for path in entries:
+ response = readurl(combine_url(endpoint, path))
+ if path.endswith(".json"):
+ ret[path.rpartition(".")[0]] = (
+ json.loads(response.contents.decode('utf-8')))
+ else:
+ ret[path] = response.contents
+
+ return {version: ret}
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceOracle, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import argparse
+ import os
+
+ parser = argparse.ArgumentParser(description='Query Oracle Cloud Metadata')
+ parser.add_argument("--endpoint", metavar="URL",
+ help="The url of the metadata service.",
+ default=METADATA_ENDPOINT)
+ args = parser.parse_args()
+ sys_uuid = "uuid-not-available-not-root" if os.geteuid() != 0 else None
+
+ data = read_metadata(endpoint_base=args.endpoint, sys_uuid=sys_uuid)
+ data['is_platform_viable'] = _is_platform_viable()
+ print(util.json_dumps(data))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index e2502b02..9dc4ab23 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -29,7 +29,9 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
-
+from cloudinit import net
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
@@ -168,8 +170,8 @@ def query_data_api(api_type, api_address, retries, timeout):
class DataSourceScaleway(sources.DataSource):
-
dsname = "Scaleway"
+ update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
@@ -185,11 +187,10 @@ class DataSourceScaleway(sources.DataSource):
self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
+ self._fallback_interface = None
+ self._network_config = None
- def _get_data(self):
- if not on_scaleway():
- return False
-
+ def _crawl_metadata(self):
resp = url_helper.readurl(self.metadata_address,
timeout=self.timeout,
retries=self.retries)
@@ -203,9 +204,48 @@ class DataSourceScaleway(sources.DataSource):
'vendor-data', self.vendordata_address,
self.retries, self.timeout
)
+
+ def _get_data(self):
+ if not on_scaleway():
+ return False
+
+ if self._fallback_interface is None:
+ self._fallback_interface = net.find_fallback_nic()
+ try:
+ with EphemeralDHCPv4(self._fallback_interface):
+ util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self._crawl_metadata)
+ except (NoDHCPLeaseError) as e:
+ util.logexc(LOG, str(e))
+ return False
return True
@property
+ def network_config(self):
+ """
+ Configure networking according to data received from the
+ metadata API.
+ """
+ if self._network_config:
+ return self._network_config
+
+ if self._fallback_interface is None:
+ self._fallback_interface = net.find_fallback_nic()
+
+ netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
+ subnets = [{'type': 'dhcp4'}]
+ if self.metadata['ipv6']:
+ subnets += [{'type': 'static',
+ 'address': '%s' % self.metadata['ipv6']['address'],
+ 'gateway': '%s' % self.metadata['ipv6']['gateway'],
+ 'netmask': '%s' % self.metadata['ipv6']['netmask'],
+ }]
+ netcfg['subnets'] = subnets
+ self._network_config = {'version': 1, 'config': [netcfg]}
+ return self._network_config
+
+ @property
def launch_index(self):
return None
@@ -228,7 +268,7 @@ class DataSourceScaleway(sources.DataSource):
datasources = [
- (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceScaleway, (sources.DEP_FILESYSTEM,)),
]
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index f92e8b5c..593ac91a 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -564,7 +564,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
continue
LOG.warning('Unexpected response "%s" during flush', response)
except JoyentMetadataTimeoutException:
- LOG.warning('Timeout while initializing metadata client. ' +
+ LOG.warning('Timeout while initializing metadata client. '
'Is the host metadata service running?')
LOG.debug('Got "invalid command". Flush complete.')
self.fp.timeout = timeout
@@ -683,6 +683,18 @@ def jmc_client_factory(
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
+def identify_file(content_f):
+ cmd = ["file", "--brief", "--mime-type", content_f]
+ f_type = None
+ try:
+ (f_type, _err) = util.subp(cmd)
+ LOG.debug("script %s mime type is %s", content_f, f_type)
+ except util.ProcessExecutionError as e:
+ util.logexc(
+ LOG, ("Failed to identify script type for %s" % content_f, e))
+ return None if f_type is None else f_type.strip()
+
+
def write_boot_content(content, content_f, link=None, shebang=False,
mode=0o400):
"""
@@ -715,18 +727,11 @@ def write_boot_content(content, content_f, link=None, shebang=False,
util.write_file(content_f, content, mode=mode)
if shebang and not content.startswith("#!"):
- try:
- cmd = ["file", "--brief", "--mime-type", content_f]
- (f_type, _err) = util.subp(cmd)
- LOG.debug("script %s mime type is %s", content_f, f_type)
- if f_type.strip() == "text/plain":
- new_content = "\n".join(["#!/bin/bash", content])
- util.write_file(content_f, new_content, mode=mode)
- LOG.debug("added shebang to file %s", content_f)
-
- except Exception as e:
- util.logexc(LOG, ("Failed to identify script type for %s" %
- content_f, e))
+ f_type = identify_file(content_f)
+ if f_type == "text/plain":
+ util.write_file(
+ content_f, "\n".join(["#!/bin/bash", content]), mode=mode)
+ LOG.debug("added shebang to file %s", content_f)
if link:
try:
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index f424316a..5ac98826 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -38,8 +38,17 @@ DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
DS_PREFIX = 'DataSource'
-# File in which instance meta-data, user-data and vendor-data is written
+EXPERIMENTAL_TEXT = (
+ "EXPERIMENTAL: The structure and format of content scoped under the 'ds'"
+ " key may change in subsequent releases of cloud-init.")
+
+
+# File in which public available instance meta-data is written
+# security-sensitive key values are redacted from this world-readable file
INSTANCE_JSON_FILE = 'instance-data.json'
+# security-sensitive key values are present in this root-readable file
+INSTANCE_JSON_SENSITIVE_FILE = 'instance-data-sensitive.json'
+REDACT_SENSITIVE_VALUE = 'redacted for non-root user'
# Key which can be provide a cloud's official product name to cloud-init
METADATA_CLOUD_NAME_KEY = 'cloud-name'
@@ -58,26 +67,55 @@ class InvalidMetaDataException(Exception):
pass
-def process_base64_metadata(metadata, key_path=''):
- """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
+def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
+ """Process all instance metadata cleaning it up for persisting as json.
+
+ Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list
+
+ @return Dict copy of processed metadata.
+ """
md_copy = copy.deepcopy(metadata)
- md_copy['base64-encoded-keys'] = []
+ md_copy['base64_encoded_keys'] = []
+ md_copy['sensitive_keys'] = []
for key, val in metadata.items():
if key_path:
sub_key_path = key_path + '/' + key
else:
sub_key_path = key
+ if key in sensitive_keys or sub_key_path in sensitive_keys:
+ md_copy['sensitive_keys'].append(sub_key_path)
if isinstance(val, str) and val.startswith('ci-b64:'):
- md_copy['base64-encoded-keys'].append(sub_key_path)
+ md_copy['base64_encoded_keys'].append(sub_key_path)
md_copy[key] = val.replace('ci-b64:', '')
if isinstance(val, dict):
- return_val = process_base64_metadata(val, sub_key_path)
- md_copy['base64-encoded-keys'].extend(
- return_val.pop('base64-encoded-keys'))
+ return_val = process_instance_metadata(
+ val, sub_key_path, sensitive_keys)
+ md_copy['base64_encoded_keys'].extend(
+ return_val.pop('base64_encoded_keys'))
+ md_copy['sensitive_keys'].extend(
+ return_val.pop('sensitive_keys'))
md_copy[key] = return_val
return md_copy
+def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
+ """Redact any sensitive keys from to provided metadata dictionary.
+
+ Replace any keys values listed in 'sensitive_keys' with redact_value.
+ """
+ if not metadata.get('sensitive_keys', []):
+ return metadata
+ md_copy = copy.deepcopy(metadata)
+ for key_path in metadata.get('sensitive_keys'):
+ path_parts = key_path.split('/')
+ obj = md_copy
+ for path in path_parts:
+ if isinstance(obj[path], dict) and path != path_parts[-1]:
+ obj = obj[path]
+ obj[path] = redact_value
+ return md_copy
+
+
URLParams = namedtuple(
'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
@@ -103,14 +141,14 @@ class DataSource(object):
url_timeout = 10 # timeout for each metadata url read attempt
url_retries = 5 # number of times to retry url upon 404
- # The datasource defines a list of supported EventTypes during which
+ # The datasource defines a set of supported EventTypes during which
# the datasource can react to changes in metadata and regenerate
# network configuration on metadata changes.
# A datasource which supports writing network config on each system boot
- # would set update_events = {'network': [EventType.BOOT]}
+ # would call update_events['network'].add(EventType.BOOT).
# Default: generate network config on new instance id (first boot).
- update_events = {'network': [EventType.BOOT_NEW_INSTANCE]}
+ update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
@@ -122,6 +160,10 @@ class DataSource(object):
_dirty_cache = False
+ # N-tuple of keypaths or keynames redact from instance-data.json for
+ # non-root users
+ sensitive_metadata_keys = ('security-credentials',)
+
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
@@ -147,12 +189,24 @@ class DataSource(object):
def _get_standardized_metadata(self):
"""Return a dictionary of standardized metadata keys."""
- return {'v1': {
- 'local-hostname': self.get_hostname(),
- 'instance-id': self.get_instance_id(),
- 'cloud-name': self.cloud_name,
- 'region': self.region,
- 'availability-zone': self.availability_zone}}
+ local_hostname = self.get_hostname()
+ instance_id = self.get_instance_id()
+ availability_zone = self.availability_zone
+ cloud_name = self.cloud_name
+ # When adding new standard keys prefer underscore-delimited instead
+ # of hyphen-delimted to support simple variable references in jinja
+ # templates.
+ return {
+ 'v1': {
+ 'availability-zone': availability_zone,
+ 'availability_zone': availability_zone,
+ 'cloud-name': cloud_name,
+ 'cloud_name': cloud_name,
+ 'instance-id': instance_id,
+ 'instance_id': instance_id,
+ 'local-hostname': local_hostname,
+ 'local_hostname': local_hostname,
+ 'region': self.region}}
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -180,15 +234,22 @@ class DataSource(object):
"""
self._dirty_cache = True
return_value = self._get_data()
- json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
if not return_value:
return return_value
+ self.persist_instance_data()
+ return return_value
+
+ def persist_instance_data(self):
+ """Process and write INSTANCE_JSON_FILE with all instance metadata.
+ Replace any hyphens with underscores in key names for use in template
+ processing.
+
+ @return True on successful write, False otherwise.
+ """
instance_data = {
- 'ds': {
- 'meta-data': self.metadata,
- 'user-data': self.get_userdata_raw(),
- 'vendor-data': self.get_vendordata_raw()}}
+ 'ds': {'_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': self.metadata}}
if hasattr(self, 'network_json'):
network_json = getattr(self, 'network_json')
if network_json != UNSET:
@@ -202,16 +263,23 @@ class DataSource(object):
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
- # Strip base64: prefix and return base64-encoded-keys
- processed_data = process_base64_metadata(json.loads(content))
+ # Strip base64: prefix and set base64_encoded_keys list.
+ processed_data = process_instance_metadata(
+ json.loads(content),
+ sensitive_keys=self.sensitive_metadata_keys)
except TypeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
- return return_value
+ return False
except UnicodeDecodeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
- return return_value
- write_json(json_file, processed_data, mode=0o600)
- return return_value
+ return False
+ json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ write_json(json_file, processed_data) # World readable
+ json_sensitive_file = os.path.join(self.paths.run_dir,
+ INSTANCE_JSON_SENSITIVE_FILE)
+ write_json(json_sensitive_file,
+ redact_sensitive_keys(processed_data), mode=0o600)
+ return True
def _get_data(self):
"""Walk metadata sources, process crawled data and save attributes."""
@@ -475,8 +543,8 @@ class DataSource(object):
for update_scope, update_events in self.update_events.items():
if event in update_events:
if not supported_events.get(update_scope):
- supported_events[update_scope] = []
- supported_events[update_scope].append(event)
+ supported_events[update_scope] = set()
+ supported_events[update_scope].add(event)
for scope, matched_events in supported_events.items():
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
@@ -490,6 +558,8 @@ class DataSource(object):
result = self.get_data()
if result:
return True
+ LOG.debug("Datasource %s not updated for events: %s", self,
+ ', '.join(source_event_types))
return False
def check_instance_id(self, sys_cfg):
@@ -669,6 +739,10 @@ def convert_vendordata(data, recurse=True):
raise ValueError("Unknown data type for vendordata: %s" % type(data))
+class BrokenMetadata(IOError):
+ pass
+
+
# 'depends' is a list of dependencies (DEP_FILESYSTEM)
# ds_list is a list of 2 item lists
# ds_list = [
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index a4cf0667..9c29ceac 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -21,6 +21,8 @@ from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
+from cloudinit.sources import BrokenMetadata
+
# See https://docs.openstack.org/user-guide/cli-config-drive.html
LOG = logging.getLogger(__name__)
@@ -36,21 +38,38 @@ KEY_COPIES = (
('local-hostname', 'hostname', False),
('instance-id', 'uuid', True),
)
+
+# Versions and names taken from nova source nova/api/metadata/base.py
OS_LATEST = 'latest'
OS_FOLSOM = '2012-08-10'
OS_GRIZZLY = '2013-04-04'
OS_HAVANA = '2013-10-17'
OS_LIBERTY = '2015-10-15'
+# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan)
+OS_NEWTON_ONE = '2016-06-30'
+# NEWTON_TWO adds vendor_data2.json (vendordata-reboot)
+OS_NEWTON_TWO = '2016-10-06'
+# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan)
+OS_OCATA = '2017-02-22'
+# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs)
+OS_ROCKY = '2018-08-27'
+
+
# keep this in chronological order. new supported versions go at the end.
OS_VERSIONS = (
OS_FOLSOM,
OS_GRIZZLY,
OS_HAVANA,
OS_LIBERTY,
+ OS_NEWTON_ONE,
+ OS_NEWTON_TWO,
+ OS_OCATA,
+ OS_ROCKY,
)
PHYSICAL_TYPES = (
None,
+ 'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
'dvs',
'ethernet',
@@ -68,10 +87,6 @@ class NonReadable(IOError):
pass
-class BrokenMetadata(IOError):
- pass
-
-
class SourceMixin(object):
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
@@ -441,7 +456,7 @@ class MetadataReader(BaseReader):
return self._versions
found = []
version_path = self._path_join(self.base_path, "openstack")
- content = self._path_read(version_path)
+ content = self._path_read(version_path, decode=True)
for line in content.splitlines():
line = line.strip()
if not line:
@@ -589,6 +604,8 @@ def convert_net_json(network_json=None, known_macs=None):
cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
elif link['type'] in ['bond']:
params = {}
+ if link_mac_addr:
+ params['mac_address'] = link_mac_addr
for k, v in link.items():
if k == 'bond_links':
continue
@@ -658,6 +675,17 @@ def convert_net_json(network_json=None, known_macs=None):
else:
cfg[key] = fmt % link_id_info[target]['name']
+ # Infiniband interfaces may be referenced in network_data.json by a 6 byte
+ # Ethernet MAC-style address, and we use that address to look up the
+ # interface name above. Now ensure that the hardware address is set to the
+ # full 20 byte address.
+ ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
+ if ib_known_hwaddrs:
+ for cfg in config:
+ if cfg['name'] in ib_known_hwaddrs:
+ cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
+ cfg['type'] = 'infiniband'
+
for service in services:
cfg = service
cfg.update({'type': 'nameserver'})
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 3ef8c624..e1890e23 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -164,7 +164,7 @@ class NicConfigurator(object):
return ([subnet], route_list)
# Add routes if there is no primary nic
- if not self._primaryNic:
+ if not self._primaryNic and v4.gateways:
route_list.extend(self.gen_ipv4_route(nic,
v4.gateways,
v4.netmask))
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index dcd221be..8082019e 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import inspect
import os
import six
@@ -9,7 +10,8 @@ from cloudinit.event import EventType
from cloudinit.helpers import Paths
from cloudinit import importer
from cloudinit.sources import (
- INSTANCE_JSON_FILE, DataSource, UNSET)
+ EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
+ REDACT_SENSITIVE_VALUE, UNSET, DataSource, redact_sensitive_keys)
from cloudinit.tests.helpers import CiTestCase, skipIf, mock
from cloudinit.user_data import UserDataProcessor
from cloudinit import util
@@ -20,24 +22,30 @@ class DataSourceTestSubclassNet(DataSource):
dsname = 'MyTestSubclass'
url_max_wait = 55
- def __init__(self, sys_cfg, distro, paths, custom_userdata=None):
+ def __init__(self, sys_cfg, distro, paths, custom_metadata=None,
+ custom_userdata=None, get_data_retval=True):
super(DataSourceTestSubclassNet, self).__init__(
sys_cfg, distro, paths)
self._custom_userdata = custom_userdata
+ self._custom_metadata = custom_metadata
+ self._get_data_retval = get_data_retval
def _get_cloud_name(self):
return 'SubclassCloudName'
def _get_data(self):
- self.metadata = {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'}
+ if self._custom_metadata:
+ self.metadata = self._custom_metadata
+ else:
+ self.metadata = {'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion'}
if self._custom_userdata:
self.userdata_raw = self._custom_userdata
else:
self.userdata_raw = 'userdata_raw'
self.vendordata_raw = 'vendordata_raw'
- return True
+ return self._get_data_retval
class InvalidDataSourceTestSubclassNet(DataSource):
@@ -264,8 +272,19 @@ class TestDataSource(CiTestCase):
self.assertEqual('fqdnhostname.domain.com',
datasource.get_hostname(fqdn=True))
- def test_get_data_write_json_instance_data(self):
- """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root."""
+ def test_get_data_does_not_write_instance_data_on_failure(self):
+ """get_data does not write INSTANCE_JSON_FILE on get_data False."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ get_data_retval=False)
+ self.assertFalse(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ self.assertFalse(
+ os.path.exists(json_file), 'Found unexpected file %s' % json_file)
+
+ def test_get_data_writes_json_instance_data_on_success(self):
+ """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
@@ -273,40 +292,126 @@ class TestDataSource(CiTestCase):
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
expected = {
- 'base64-encoded-keys': [],
+ 'base64_encoded_keys': [],
+ 'sensitive_keys': [],
'v1': {
'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
'region': 'myregion'},
'ds': {
- 'meta-data': {'availability_zone': 'myaz',
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {'availability_zone': 'myaz',
'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'},
- 'user-data': 'userdata_raw',
- 'vendor-data': 'vendordata_raw'}}
+ 'region': 'myregion'}}}
self.assertEqual(expected, util.load_json(content))
file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_metadata={
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': {
+ 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ self.assertEqual(
+ ('security-credentials',), datasource.sensitive_metadata_keys)
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ redacted = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {'cred1': 'sekret', 'cred2': 'othersekret'},
+ redacted['ds']['meta_data']['some']['security-credentials'])
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ 'base64_encoded_keys': [],
+ 'sensitive_keys': ['ds/meta_data/some/security-credentials'],
+ 'v1': {
+ 'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
+ 'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
+ 'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
+ 'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
+ 'region': 'myregion'},
+ 'ds': {
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
+ }
+ self.maxDiff = None
+ self.assertEqual(expected, util.load_json(content))
+ file_stat = os.stat(sensitive_json_file)
self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
def test_get_data_handles_redacted_unserializable_content(self):
"""get_data warns unserializable content in INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
- self.assertTrue(datasource.get_data())
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
- expected_userdata = {
+ expected_metadata = {
'key1': 'val1',
'key2': {
'key2.1': "Warning: redacted unserializable type <class"
" 'cloudinit.helpers.Paths'>"}}
instance_json = util.load_json(content)
self.assertEqual(
- expected_userdata, instance_json['ds']['user-data'])
+ expected_metadata, instance_json['ds']['meta_data'])
+
+ def test_persist_instance_data_writes_ec2_metadata_when_set(self):
+ """When ec2_metadata class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ datasource.ec2_metadata = UNSET
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn('ec2_metadata', instance_data['ds'])
+ datasource.ec2_metadata = {'ec2stuff': 'is good'}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {'ec2stuff': 'is good'},
+ instance_data['ds']['ec2_metadata'])
+
+ def test_persist_instance_data_writes_network_json_when_set(self):
+ """When network_data.json class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn('network_json', instance_data['ds'])
+ datasource.network_json = {'network_json': 'is good'}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {'network_json': 'is good'},
+ instance_data['ds']['network_json'])
@skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes")
def test_get_data_base64encodes_unserializable_bytes(self):
@@ -314,17 +419,17 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertEqual(
- ['ds/user-data/key2/key2.1'],
- instance_json['base64-encoded-keys'])
+ self.assertItemsEqual(
+ ['ds/meta_data/key2/key2.1'],
+ instance_json['base64_encoded_keys'])
self.assertEqual(
{'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
- instance_json['ds']['user-data'])
+ instance_json['ds']['meta_data'])
@skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
def test_get_data_handles_bytes_values(self):
@@ -332,15 +437,15 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertEqual([], instance_json['base64-encoded-keys'])
+ self.assertEqual([], instance_json['base64_encoded_keys'])
self.assertEqual(
{'key1': 'val1', 'key2': {'key2.1': '\x123'}},
- instance_json['ds']['user-data'])
+ instance_json['ds']['meta_data'])
@skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
def test_non_utf8_encoding_logs_warning(self):
@@ -348,7 +453,7 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
self.assertFalse(os.path.exists(json_file))
@@ -429,8 +534,9 @@ class TestDataSource(CiTestCase):
def test_update_metadata_only_acts_on_supported_update_events(self):
"""update_metadata won't get_data on unsupported update events."""
+ self.datasource.update_events['network'].discard(EventType.BOOT)
self.assertEqual(
- {'network': [EventType.BOOT_NEW_INSTANCE]},
+ {'network': set([EventType.BOOT_NEW_INSTANCE])},
self.datasource.update_events)
def fake_get_data():
@@ -461,4 +567,36 @@ class TestDataSource(CiTestCase):
self.logs.getvalue())
+class TestRedactSensitiveData(CiTestCase):
+
+ def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
+ """When sensitive_keys is absent or empty from metadata do nothing."""
+ md = {'my': 'data'}
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value='redacted'))
+ md['sensitive_keys'] = []
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value='redacted'))
+
+ def test_redact_sensitive_data_redacts_exact_match_name(self):
+ """Only exact matched sensitive_keys are redacted from metadata."""
+ md = {'sensitive_keys': ['md/secure'],
+ 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+ secure_md = copy.deepcopy(md)
+ secure_md['md']['secure'] = 'redacted'
+ self.assertEqual(
+ secure_md,
+ redact_sensitive_keys(md, redact_value='redacted'))
+
+ def test_redact_sensitive_data_does_redacts_with_default_string(self):
+ """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
+ md = {'sensitive_keys': ['md/secure'],
+ 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+ secure_md = copy.deepcopy(md)
+ secure_md['md']['secure'] = 'redacted for non-root user'
+ self.assertEqual(
+ secure_md,
+ redact_sensitive_keys(md))
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
new file mode 100644
index 00000000..7599126c
--- /dev/null
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -0,0 +1,331 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources import BrokenMetadata
+from cloudinit import helpers
+
+from cloudinit.tests import helpers as test_helpers
+
+from textwrap import dedent
+import argparse
+import httpretty
+import json
+import mock
+import os
+import six
+import uuid
+
+DS_PATH = "cloudinit.sources.DataSourceOracle"
+MD_VER = "2013-10-17"
+
+
+class TestDataSourceOracle(test_helpers.CiTestCase):
+ """Test datasource DataSourceOracle."""
+
+ ds_class = oracle.DataSourceOracle
+
+ my_uuid = str(uuid.uuid4())
+ my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
+ "name": "ci-vm1", "availability_zone": "phx-ad-3",
+ "hostname": "ci-vm1hostname",
+ "launch_index": 0, "files": [],
+ "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
+ "meta": {}}
+
+ def _patch_instance(self, inst, patches):
+ """Patch an instance of a class 'inst'.
+ for each name, kwargs in patches:
+ inst.name = mock.Mock(**kwargs)
+ returns a namespace object that has
+ namespace.name = mock.Mock(**kwargs)
+ Do not bother with cleanup as instance is assumed transient."""
+ mocks = argparse.Namespace()
+ for name, kwargs in patches.items():
+ imock = mock.Mock(name=name, spec=getattr(inst, name), **kwargs)
+ setattr(mocks, name, imock)
+ setattr(inst, name, imock)
+ return mocks
+
+ def _get_ds(self, sys_cfg=None, distro=None, paths=None, ud_proc=None,
+ patches=None):
+ if sys_cfg is None:
+ sys_cfg = {}
+ if patches is None:
+ patches = {}
+ if paths is None:
+ tmpd = self.tmp_dir()
+ dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
+ 'run_dir': self.tmp_path('run_dir')}
+ for d in dirs.values():
+ os.mkdir(d)
+ paths = helpers.Paths(dirs)
+
+ ds = self.ds_class(sys_cfg=sys_cfg, distro=distro,
+ paths=paths, ud_proc=ud_proc)
+
+ return ds, self._patch_instance(ds, patches)
+
+ def test_platform_not_viable_returns_false(self):
+ ds, mocks = self._get_ds(
+ patches={'_is_platform_viable': {'return_value': False}})
+ self.assertFalse(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_without_userdata(self, m_is_iscsi_root):
+ """If no user-data is provided, it should not be in return dict."""
+ ds, mocks = self._get_ds(patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ self.assertTrue(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+ mocks.crawl_metadata.assert_called_once_with()
+ self.assertEqual(self.my_uuid, ds.system_uuid)
+ self.assertEqual(self.my_md['availability_zone'], ds.availability_zone)
+ self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
+ self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
+ self.assertIsNone(ds.userdata_raw)
+
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_with_vendordata(self, m_is_iscsi_root):
+ """Test with vendor data."""
+ vd = {'cloud-init': '#cloud-config\nkey: value'}
+ ds, mocks = self._get_ds(patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md,
+ 'vendor_data': vd}}}})
+ self.assertTrue(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+ mocks.crawl_metadata.assert_called_once_with()
+ self.assertEqual(vd, ds.vendordata_pure)
+ self.assertEqual(vd['cloud-init'], ds.vendordata_raw)
+
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_with_userdata(self, m_is_iscsi_root):
+ """Ensure user-data is populated if present and is binary."""
+ my_userdata = b'abcdefg'
+ ds, mocks = self._get_ds(patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md,
+ 'user_data': my_userdata}}}})
+ self.assertTrue(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+ mocks.crawl_metadata.assert_called_once_with()
+ self.assertEqual(self.my_uuid, ds.system_uuid)
+ self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
+ self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
+ self.assertEqual(my_userdata, ds.userdata_raw)
+
+ @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config):
+ """network_config should read kernel cmdline."""
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ncfg = {'version': 1, 'config': [{'a': 'b'}]}
+ m_cmdline_config.return_value = ncfg
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ncfg, ds.network_config)
+ m_cmdline_config.assert_called_once_with()
+ self.assertFalse(distro.generate_fallback_config.called)
+
+ @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config):
+ """test that fallback network is generated if no kernel cmdline."""
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ncfg = {'version': 1, 'config': [{'a': 'b'}]}
+ m_cmdline_config.return_value = None
+ self.assertTrue(ds._get_data())
+ ncfg = {'version': 1, 'config': [{'distro1': 'value'}]}
+ distro.generate_fallback_config.return_value = ncfg
+ self.assertEqual(ncfg, ds.network_config)
+ m_cmdline_config.assert_called_once_with()
+ distro.generate_fallback_config.assert_called_once_with()
+ self.assertEqual(1, m_cmdline_config.call_count)
+
+ # test that the result got cached, and the methods not re-called.
+ self.assertEqual(ncfg, ds.network_config)
+ self.assertEqual(1, m_cmdline_config.call_count)
+
+
+@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4()))
+class TestReadMetaData(test_helpers.HttprettyTestCase):
+ """Test the read_metadata which interacts with http metadata service."""
+
+ mdurl = oracle.METADATA_ENDPOINT
+ my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
+ "name": "ci-vm1", "availability_zone": "phx-ad-3",
+ "hostname": "ci-vm1hostname",
+ "launch_index": 0, "files": [],
+ "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
+ "meta": {}}
+
+ def populate_md(self, data):
+ """call httppretty.register_url for each item dict 'data',
+ including valid indexes. Text values converted to bytes."""
+ httpretty.register_uri(
+ httpretty.GET, self.mdurl + MD_VER + "/",
+ '\n'.join(data.keys()).encode('utf-8'))
+ for k, v in data.items():
+ httpretty.register_uri(
+ httpretty.GET, self.mdurl + MD_VER + "/" + k,
+ v if not isinstance(v, six.text_type) else v.encode('utf-8'))
+
+ def test_broken_no_sys_uuid(self, m_read_system_uuid):
+ """Datasource requires ability to read system_uuid and true return."""
+ m_read_system_uuid.return_value = None
+ self.assertRaises(BrokenMetadata, oracle.read_metadata)
+
+ def test_broken_no_metadata_json(self, m_read_system_uuid):
+ """Datasource requires meta_data.json."""
+ httpretty.register_uri(
+ httpretty.GET, self.mdurl + MD_VER + "/",
+ '\n'.join(['user_data']).encode('utf-8'))
+ with self.assertRaises(BrokenMetadata) as cm:
+ oracle.read_metadata()
+ self.assertIn("Required field 'meta_data.json' missing",
+ str(cm.exception))
+
+ def test_with_userdata(self, m_read_system_uuid):
+ data = {'user_data': b'#!/bin/sh\necho hi world\n',
+ 'meta_data.json': json.dumps(self.my_md)}
+ self.populate_md(data)
+ result = oracle.read_metadata()[MD_VER]
+ self.assertEqual(data['user_data'], result['user_data'])
+ self.assertEqual(self.my_md, result['meta_data'])
+
+ def test_without_userdata(self, m_read_system_uuid):
+ data = {'meta_data.json': json.dumps(self.my_md)}
+ self.populate_md(data)
+ result = oracle.read_metadata()[MD_VER]
+ self.assertNotIn('user_data', result)
+ self.assertEqual(self.my_md, result['meta_data'])
+
+ def test_unknown_fields_included(self, m_read_system_uuid):
+ """Unknown fields listed in index should be included.
+ And those ending in .json should be decoded."""
+ some_data = {'key1': 'data1', 'subk1': {'subd1': 'subv'}}
+ some_vendor_data = {'cloud-init': 'foo'}
+ data = {'meta_data.json': json.dumps(self.my_md),
+ 'some_data.json': json.dumps(some_data),
+ 'vendor_data.json': json.dumps(some_vendor_data),
+ 'other_blob': b'this is blob'}
+ self.populate_md(data)
+ result = oracle.read_metadata()[MD_VER]
+ self.assertNotIn('user_data', result)
+ self.assertEqual(self.my_md, result['meta_data'])
+ self.assertEqual(some_data, result['some_data'])
+ self.assertEqual(some_vendor_data, result['vendor_data'])
+ self.assertEqual(data['other_blob'], result['other_blob'])
+
+
+class TestIsPlatformViable(test_helpers.CiTestCase):
+ @mock.patch(DS_PATH + ".util.read_dmi_data",
+ return_value=oracle.CHASSIS_ASSET_TAG)
+ def test_expected_viable(self, m_read_dmi_data):
+ """System with known chassis tag is viable."""
+ self.assertTrue(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+
+ @mock.patch(DS_PATH + ".util.read_dmi_data", return_value=None)
+ def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data):
+ """System without known chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+
+ @mock.patch(DS_PATH + ".util.read_dmi_data", return_value="LetsGoCubs")
+ def test_expected_not_viable_other(self, m_read_dmi_data):
+ """System with unnown chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+
+
+class TestLoadIndex(test_helpers.CiTestCase):
+ """_load_index handles parsing of an index into a proper list.
+ The tests here guarantee correct parsing of html version or
+ a fixed version. See the function docstring for more doc."""
+
+ _known_html_api_versions = dedent("""\
+ <html>
+ <head><title>Index of /openstack/</title></head>
+ <body bgcolor="white">
+ <h1>Index of /openstack/</h1><hr><pre><a href="../">../</a>
+ <a href="2013-10-17/">2013-10-17/</a> 27-Jun-2018 12:22 -
+ <a href="latest/">latest/</a> 27-Jun-2018 12:22 -
+ </pre><hr></body>
+ </html>""")
+
+ _known_html_contents = dedent("""\
+ <html>
+ <head><title>Index of /openstack/2013-10-17/</title></head>
+ <body bgcolor="white">
+ <h1>Index of /openstack/2013-10-17/</h1><hr><pre><a href="../">../</a>
+ <a href="meta_data.json">meta_data.json</a> 27-Jun-2018 12:22 679
+ <a href="user_data">user_data</a> 27-Jun-2018 12:22 146
+ </pre><hr></body>
+ </html>""")
+
+ def test_parse_html(self):
+ """Test parsing of lower case html."""
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index(self._known_html_api_versions))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index(self._known_html_contents))
+
+ def test_parse_html_upper(self):
+ """Test parsing of upper case html, although known content is lower."""
+ def _toupper(data):
+ return data.replace("<a", "<A").replace("html>", "HTML>")
+
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index(_toupper(self._known_html_api_versions)))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index(_toupper(self._known_html_contents)))
+
+ def test_parse_newline_list_with_endl(self):
+ """Test parsing of newline separated list with ending newline."""
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index("\n".join(["2013-10-17/", "latest/", ""])))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index("\n".join(["meta_data.json", "user_data", ""])))
+
+ def test_parse_newline_list_without_endl(self):
+ """Test parsing of newline separated list with no ending newline.
+
+ Actual openstack implementation does not include trailing newline."""
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index("\n".join(["2013-10-17/", "latest/"])))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index("\n".join(["meta_data.json", "user_data"])))
+
+
+# vi: ts=4 expandtab