summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cloudinit/ec2_utils.py23
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py468
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py163
-rw-r--r--cloudinit/sources/helpers/__init__.py14
-rw-r--r--cloudinit/sources/helpers/openstack.py420
-rw-r--r--cloudinit/url_helper.py68
-rw-r--r--cloudinit/util.py10
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py35
-rw-r--r--tests/unittests/test_datasource/test_openstack.py122
-rw-r--r--tests/unittests/test_ec2_util.py35
10 files changed, 938 insertions, 420 deletions
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 7f4c0443..91cba20f 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -21,7 +21,6 @@ from urlparse import (urlparse, urlunparse)
import functools
import json
-import urllib
from cloudinit import log as logging
from cloudinit import url_helper
@@ -40,16 +39,6 @@ def maybe_json_object(text):
return False
-def combine_url(base, add_on):
- base_parsed = list(urlparse(base))
- path = base_parsed[2]
- if path and not path.endswith("/"):
- path += "/"
- path += urllib.quote(str(add_on), safe="/:")
- base_parsed[2] = path
- return urlunparse(base_parsed)
-
-
# See: http://bit.ly/TyoUQs
#
class MetadataMaterializer(object):
@@ -121,14 +110,14 @@ class MetadataMaterializer(object):
(leaves, children) = self._parse(blob)
child_contents = {}
for c in children:
- child_url = combine_url(base_url, c)
+ child_url = url_helper.combine_url(base_url, c)
if not child_url.endswith("/"):
child_url += "/"
child_blob = str(self._caller(child_url))
child_contents[c] = self._materialize(child_blob, child_url)
leaf_contents = {}
for (field, resource) in leaves.items():
- leaf_url = combine_url(base_url, resource)
+ leaf_url = url_helper.combine_url(base_url, resource)
leaf_blob = str(self._caller(leaf_url))
leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob)
joined = {}
@@ -153,8 +142,8 @@ def _skip_retry_on_codes(status_codes, _request_args, cause):
def get_instance_userdata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5):
- ud_url = combine_url(metadata_address, api_version)
- ud_url = combine_url(ud_url, 'user-data')
+ ud_url = url_helper.combine_url(metadata_address, api_version)
+ ud_url = url_helper.combine_url(ud_url, 'user-data')
user_data = ''
try:
# It is ok for userdata to not exist (thats why we are stopping if
@@ -178,8 +167,8 @@ def get_instance_userdata(api_version='latest',
def get_instance_metadata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5):
- md_url = combine_url(metadata_address, api_version)
- md_url = combine_url(md_url, 'meta-data')
+ md_url = url_helper.combine_url(metadata_address, api_version)
+ md_url = url_helper.combine_url(md_url, 'meta-data')
caller = functools.partial(util.read_file_or_url,
ssl_details=ssl_details, timeout=timeout,
retries=retries)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 2a244496..1d30fe08 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -18,181 +18,79 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import base64
-import json
import os
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
+from cloudinit.sources.helpers import openstack
+
LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive"
DEFAULT_MODE = 'pass'
-CFG_DRIVE_FILES_V1 = [
- "etc/network/interfaces",
- "root/.ssh/authorized_keys",
- "meta.js",
-]
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
VALID_DSMODES = ("local", "net", "pass", "disabled")
+FS_TYPES = ('vfat', 'iso9660')
+LABEL_TYPES = ('config-2',)
+OPTICAL_DEVICES = tuple(('/dev/sr%s' % i for i in range(0, 2)))
-class ConfigDriveHelper(object):
- def __init__(self, distro):
- self.distro = distro
-
- def on_first_boot(self, data):
- if not data:
- data = {}
- if 'network_config' in data:
- LOG.debug("Updating network interfaces from config drive")
- self.distro.apply_network(data['network_config'])
- files = data.get('files')
- if files:
- LOG.debug("Writing %s injected files", len(files))
- try:
- write_files(files)
- except IOError:
- util.logexc(LOG, "Failed writing files")
-
-
-class DataSourceConfigDrive(sources.DataSource):
+class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
self.dsmode = 'local'
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
self.ec2_metadata = None
- self.helper = ConfigDriveHelper(distro)
+ self.files = {}
def __str__(self):
root = sources.DataSource.__str__(self)
- mstr = "%s [%s,ver=%s]" % (root,
- self.dsmode,
- self.version)
+ mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
mstr += "[source=%s]" % (self.source)
return mstr
- def _ec2_name_to_device(self, name):
- if not self.ec2_metadata:
- return None
- bdm = self.ec2_metadata.get('block-device-mapping', {})
- for (ent_name, device) in bdm.items():
- if name == ent_name:
- return device
- return None
-
- def _os_name_to_device(self, name):
- device = None
- try:
- criteria = 'LABEL=%s' % (name)
- if name in ['swap']:
- criteria = 'TYPE=%s' % (name)
- dev_entries = util.find_devs_with(criteria)
- if dev_entries:
- device = dev_entries[0]
- except util.ProcessExecutionError:
- pass
- return device
-
- def _validate_device_name(self, device):
- if not device:
- return None
- if not device.startswith("/"):
- device = "/dev/%s" % device
- if os.path.exists(device):
- return device
- # Durn, try adjusting the mapping
- remapped = self._remap_device(os.path.basename(device))
- if remapped:
- LOG.debug("Remapped device name %s => %s", device, remapped)
- return remapped
- return None
-
- def device_name_to_device(self, name):
- # Translate a 'name' to a 'physical' device
- if not name:
- return None
- # Try the ec2 mapping first
- names = [name]
- if name == 'root':
- names.insert(0, 'ami')
- if name == 'ami':
- names.append('root')
- device = None
- LOG.debug("Using ec2 metadata lookup to find device %s", names)
- for n in names:
- device = self._ec2_name_to_device(n)
- device = self._validate_device_name(device)
- if device:
- break
- # Try the openstack way second
- if not device:
- LOG.debug("Using os lookup to find device %s", names)
- for n in names:
- device = self._os_name_to_device(n)
- device = self._validate_device_name(device)
- if device:
- break
- # Ok give up...
- if not device:
- return None
- else:
- LOG.debug("Using cfg drive lookup mapped to device %s", device)
- return device
-
def get_data(self):
found = None
md = {}
-
results = {}
if os.path.isdir(self.seed_dir):
try:
- results = read_config_drive_dir(self.seed_dir)
+ results = read_config_drive(self.seed_dir)
found = self.seed_dir
- except NonConfigDriveDir:
+ except openstack.NonReadable:
util.logexc(LOG, "Failed reading config drive from %s",
self.seed_dir)
if not found:
- devlist = find_candidate_devs()
- for dev in devlist:
+ for dev in find_candidate_devs():
try:
- results = util.mount_cb(dev, read_config_drive_dir)
+ results = util.mount_cb(dev, read_config_drive)
found = dev
- break
- except (NonConfigDriveDir, util.MountFailedError):
+ except openstack.NonReadable:
pass
- except BrokenConfigDriveDir:
- util.logexc(LOG, "broken config drive: %s", dev)
-
+ except util.MountFailedError:
+ pass
+ except openstack.BrokenMetadata:
+ util.logexc(LOG, "Broken config drive: %s", dev)
+ if found:
+ break
if not found:
return False
- md = results['metadata']
+ md = results.get('metadata', {})
md = util.mergemanydict([md, DEFAULT_METADATA])
-
- # Perform some metadata 'fixups'
- #
- # OpenStack uses the 'hostname' key
- # while most of cloud-init uses the metadata
- # 'local-hostname' key instead so if it doesn't
- # exist we need to make sure its copied over.
- for (tgt, src) in [('local-hostname', 'hostname')]:
- if tgt not in md and src in md:
- md[tgt] = md[src]
-
user_dsmode = results.get('dsmode', None)
if user_dsmode not in VALID_DSMODES + (None,):
- LOG.warn("user specified invalid mode: %s" % user_dsmode)
+ LOG.warn("User specified invalid mode: %s" % user_dsmode)
user_dsmode = None
- dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'],
+ dsmode = get_ds_mode(cfgdrv_ver=results['version'],
ds_cfg=self.ds_cfg.get('dsmode'),
user=user_dsmode)
@@ -209,7 +107,7 @@ class DataSourceConfigDrive(sources.DataSource):
prev_iid = get_previous_iid(self.paths)
cur_iid = md['instance-id']
if prev_iid != cur_iid and self.dsmode == "local":
- self.helper.on_first_boot(results)
+ on_first_boot(results, distro=self.distro)
# dsmode != self.dsmode here if:
# * dsmode = "pass", pass means it should only copy files and then
@@ -225,16 +123,11 @@ class DataSourceConfigDrive(sources.DataSource):
self.metadata = md
self.ec2_metadata = results.get('ec2-metadata')
self.userdata_raw = results.get('userdata')
- self.version = results['cfgdrive_ver']
-
+ self.version = results['version']
+ self.files.update(results.get('files', {}))
+ self.vendordata_raw = results.get('vendordata')
return True
- def get_public_ssh_keys(self):
- name = "public_keys"
- if self.version == 1:
- name = "public-keys"
- return sources.normalize_pubkey_data(self.metadata.get(name))
-
class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths):
@@ -242,222 +135,6 @@ class DataSourceConfigDriveNet(DataSourceConfigDrive):
self.dsmode = 'net'
-class NonConfigDriveDir(Exception):
- pass
-
-
-class BrokenConfigDriveDir(Exception):
- pass
-
-
-def find_candidate_devs():
- """Return a list of devices that may contain the config drive.
-
- The returned list is sorted by search order where the first item has
- should be searched first (highest priority)
-
- config drive v1:
- Per documentation, this is "associated as the last available disk on the
- instance", and should be VFAT.
- Currently, we do not restrict search list to "last available disk"
-
- config drive v2:
- Disk should be:
- * either vfat or iso9660 formated
- * labeled with 'config-2'
- """
-
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- by_fstype = (util.find_devs_with("TYPE=vfat") +
- util.find_devs_with("TYPE=iso9660"))
- by_label = util.find_devs_with("LABEL=config-2")
-
- # give preference to "last available disk" (vdb over vda)
- # note, this is not a perfect rendition of that.
- by_fstype.sort(reverse=True)
- by_label.sort(reverse=True)
-
- # combine list of items by putting by-label items first
- # followed by fstype items, but with dupes removed
- combined = (by_label + [d for d in by_fstype if d not in by_label])
-
- # We are looking for a block device or partition with necessary label or
- # an unpartitioned block device.
- combined = [d for d in combined
- if d in by_label or not util.is_partition(d)]
-
- return combined
-
-
-def read_config_drive_dir(source_dir):
- last_e = NonConfigDriveDir("Not found")
- for finder in (read_config_drive_dir_v2, read_config_drive_dir_v1):
- try:
- data = finder(source_dir)
- return data
- except NonConfigDriveDir as exc:
- last_e = exc
- raise last_e
-
-
-def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
-
- if (not os.path.isdir(os.path.join(source_dir, "openstack", version)) and
- os.path.isdir(os.path.join(source_dir, "openstack", "latest"))):
- LOG.warn("version '%s' not available, attempting to use 'latest'" %
- version)
- version = "latest"
-
- datafiles = (
- ('metadata',
- "openstack/%s/meta_data.json" % version, True, json.loads),
- ('userdata', "openstack/%s/user_data" % version, False, None),
- ('ec2-metadata', "ec2/latest/meta-data.json", False, json.loads),
- )
-
- results = {'userdata': None}
- for (name, path, required, process) in datafiles:
- fpath = os.path.join(source_dir, path)
- data = None
- found = False
- if os.path.isfile(fpath):
- try:
- data = util.load_file(fpath)
- except IOError:
- raise BrokenConfigDriveDir("Failed to read: %s" % fpath)
- found = True
- elif required:
- raise NonConfigDriveDir("Missing mandatory path: %s" % fpath)
-
- if found and process:
- try:
- data = process(data)
- except Exception as exc:
- raise BrokenConfigDriveDir(("Failed to process "
- "path: %s") % fpath)
-
- if found:
- results[name] = data
-
- # instance-id is 'uuid' for openstack. just copy it to instance-id.
- if 'instance-id' not in results['metadata']:
- try:
- results['metadata']['instance-id'] = results['metadata']['uuid']
- except KeyError:
- raise BrokenConfigDriveDir("No uuid entry in metadata")
-
- if 'random_seed' in results['metadata']:
- random_seed = results['metadata']['random_seed']
- try:
- results['metadata']['random_seed'] = base64.b64decode(random_seed)
- except (ValueError, TypeError) as exc:
- raise BrokenConfigDriveDir("Badly formatted random_seed: %s" % exc)
-
- def read_content_path(item):
- # do not use os.path.join here, as content_path starts with /
- cpath = os.path.sep.join((source_dir, "openstack",
- "./%s" % item['content_path']))
- return util.load_file(cpath)
-
- files = {}
- try:
- for item in results['metadata'].get('files', {}):
- files[item['path']] = read_content_path(item)
-
- # the 'network_config' item in metadata is a content pointer
- # to the network config that should be applied.
- # in folsom, it is just a '/etc/network/interfaces' file.
- item = results['metadata'].get("network_config", None)
- if item:
- results['network_config'] = read_content_path(item)
- except Exception as exc:
- raise BrokenConfigDriveDir("Failed to read file %s: %s" % (item, exc))
-
- # to openstack, user can specify meta ('nova boot --meta=key=value') and
- # those will appear under metadata['meta'].
- # if they specify 'dsmode' they're indicating the mode that they intend
- # for this datasource to operate in.
- try:
- results['dsmode'] = results['metadata']['meta']['dsmode']
- except KeyError:
- pass
-
- results['files'] = files
- results['cfgdrive_ver'] = 2
- return results
-
-
-def read_config_drive_dir_v1(source_dir):
- """
- read source_dir, and return a tuple with metadata dict, user-data,
- files and version (1). If not a valid dir, raise a NonConfigDriveDir
- """
-
- found = {}
- for af in CFG_DRIVE_FILES_V1:
- fn = os.path.join(source_dir, af)
- if os.path.isfile(fn):
- found[af] = fn
-
- if len(found) == 0:
- raise NonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
-
- md = {}
- keydata = ""
- if "etc/network/interfaces" in found:
- fn = found["etc/network/interfaces"]
- md['network_config'] = util.load_file(fn)
-
- if "root/.ssh/authorized_keys" in found:
- fn = found["root/.ssh/authorized_keys"]
- keydata = util.load_file(fn)
-
- meta_js = {}
- if "meta.js" in found:
- fn = found['meta.js']
- content = util.load_file(fn)
- try:
- # Just check if its really json...
- meta_js = json.loads(content)
- if not isinstance(meta_js, (dict)):
- raise TypeError("Dict expected for meta.js root node")
- except (ValueError, TypeError) as e:
- raise NonConfigDriveDir("%s: %s, %s" %
- (source_dir, "invalid json in meta.js", e))
- md['meta_js'] = content
-
- # keydata in meta_js is preferred over "injected"
- keydata = meta_js.get('public-keys', keydata)
- if keydata:
- lines = keydata.splitlines()
- md['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
-
- # config-drive-v1 has no way for openstack to provide the instance-id
- # so we copy that into metadata from the user input
- if 'instance-id' in meta_js:
- md['instance-id'] = meta_js['instance-id']
-
- results = {'cfgdrive_ver': 1, 'metadata': md}
-
- # allow the user to specify 'dsmode' in a meta tag
- if 'dsmode' in meta_js:
- results['dsmode'] = meta_js['dsmode']
-
- # config-drive-v1 has no way of specifying user-data, so the user has
- # to cheat and stuff it in a meta tag also.
- results['userdata'] = meta_js.get('user-data')
-
- # this implementation does not support files
- # (other than network/interfaces and authorized_keys)
- results['files'] = []
-
- return results
-
-
def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
"""Determine what mode should be used.
valid values are 'pass', 'disabled', 'local', 'net'
@@ -483,6 +160,21 @@ def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
return "net"
+def read_config_drive(source_dir, version="2012-08-10"):
+ reader = openstack.ConfigDriveReader(source_dir)
+ finders = [
+ (reader.read_v2, [], {'version': version}),
+ (reader.read_v1, [], {}),
+ ]
+ excps = []
+ for (functor, args, kwargs) in finders:
+ try:
+ return functor(*args, **kwargs)
+ except openstack.NonReadable as e:
+ excps.append(e)
+ raise excps[-1]
+
+
def get_previous_iid(paths):
# interestingly, for this purpose the "previous" instance-id is the current
# instance-id. cloud-init hasn't moved them over yet as this datasource
@@ -494,17 +186,79 @@ def get_previous_iid(paths):
return None
-def write_files(files):
- for (name, content) in files.iteritems():
- if name[0] != os.sep:
- name = os.sep + name
- util.write_file(name, content, mode=0660)
+def on_first_boot(data, distro=None):
+ """Performs any first-boot actions using data read from a config-drive."""
+ if not isinstance(data, dict):
+ raise TypeError("Config-drive data expected to be a dict; not %s"
+ % (type(data)))
+ net_conf = data.get("network_config", '')
+ if net_conf and distro:
+ LOG.debug("Updating network interfaces from config drive")
+ distro.apply_network(net_conf)
+ files = data.get('files', {})
+ if files:
+ LOG.debug("Writing %s injected files", len(files))
+ for (filename, content) in files.iteritems():
+ if not filename.startswith(os.sep):
+ filename = os.sep + filename
+ try:
+ util.write_file(filename, content, mode=0660)
+ except IOError:
+ util.logexc(LOG, "Failed writing file: %s", filename)
+
+
+def find_candidate_devs(probe_optical=True):
+ """Return a list of devices that may contain the config drive.
+
+ The returned list is sorted by search order where the first item has
+ should be searched first (highest priority)
+
+ config drive v1:
+ Per documentation, this is "associated as the last available disk on the
+ instance", and should be VFAT.
+ Currently, we do not restrict search list to "last available disk"
+
+ config drive v2:
+ Disk should be:
+ * either vfat or iso9660 formated
+ * labeled with 'config-2'
+ """
+ # query optical drive to get it in blkid cache for 2.6 kernels
+ if probe_optical:
+ for device in OPTICAL_DEVICES:
+ try:
+ util.find_devs_with(path=device)
+ except util.ProcessExecutionError:
+ pass
+
+ by_fstype = []
+ for fs_type in FS_TYPES:
+ by_fstype.extend(util.find_devs_with("TYPE=%s" % (fs_type)))
+
+ by_label = []
+ for label in LABEL_TYPES:
+ by_label.extend(util.find_devs_with("LABEL=%s" % (label)))
+
+ # give preference to "last available disk" (vdb over vda)
+ # note, this is not a perfect rendition of that.
+ by_fstype.sort(reverse=True)
+ by_label.sort(reverse=True)
+
+ # combine list of items by putting by-label items first
+ # followed by fstype items, but with dupes removed
+ candidates = (by_label + [d for d in by_fstype if d not in by_label])
+
+ # We are looking for a block device or partition with necessary label or
+ # an unpartitioned block device (ex sda, not sda1)
+ devices = [d for d in candidates
+ if d in by_label or not util.is_partition(d)]
+ return devices
# Used to match classes to dependencies
datasources = [
- (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
- (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
+ (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
new file mode 100644
index 00000000..44889f4e
--- /dev/null
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -0,0 +1,163 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import time
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import url_helper
+from cloudinit import util
+
+from cloudinit.sources.helpers import openstack
+
+LOG = logging.getLogger(__name__)
+
+# Various defaults/constants...
+DEF_MD_URL = "http://169.254.169.254"
+DEFAULT_IID = "iid-dsopenstack"
+DEFAULT_METADATA = {
+ "instance-id": DEFAULT_IID,
+}
+VALID_DSMODES = ("net", "disabled")
+
+
+class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
+ self.dsmode = 'net'
+ self.metadata_address = None
+ self.ssl_details = util.fetch_ssl_details(self.paths)
+ self.version = None
+ self.files = {}
+
+ def __str__(self):
+ root = sources.DataSource.__str__(self)
+ mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
+ return mstr
+
+ def _get_url_settings(self):
+ # TODO(harlowja): this is shared with ec2 datasource, we should just
+ # move it to a shared location instead...
+ ds_cfg = self.ds_cfg
+ if not ds_cfg:
+ ds_cfg = {}
+ max_wait = 120
+ try:
+ max_wait = int(ds_cfg.get("max_wait", max_wait))
+ except Exception:
+ util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
+
+ timeout = 50
+ try:
+ timeout = max(0, int(ds_cfg.get("timeout", timeout)))
+ except Exception:
+ util.logexc(LOG, "Failed to get timeout, using %s", timeout)
+ return (max_wait, timeout)
+
+ def wait_for_metadata_service(self):
+ ds_cfg = self.ds_cfg
+ if not ds_cfg:
+ ds_cfg = {}
+ urls = ds_cfg.get("metadata_urls", [DEF_MD_URL])
+ filtered = [x for x in urls if util.is_resolvable_url(x)]
+ if set(filtered) != set(urls):
+ LOG.debug("Removed the following from metadata urls: %s",
+ list((set(urls) - set(filtered))))
+ if len(filtered):
+ urls = filtered
+ else:
+ LOG.warn("Empty metadata url list! using default list")
+ urls = [DEF_MD_URL]
+
+ md_urls = []
+ url2base = {}
+ for url in urls:
+ md_url = url_helper.combine_url(url, 'openstack',
+ openstack.OS_LATEST,
+ 'meta_data.json')
+ md_urls.append(md_url)
+ url2base[md_url] = url
+
+ (max_wait, timeout) = self._get_url_settings()
+ if max_wait <= 0:
+ return False
+ start_time = time.time()
+ avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
+ timeout=timeout,
+ status_cb=LOG.warn)
+ if avail_url:
+ LOG.debug("Using metadata source: '%s'", url2base[avail_url])
+ else:
+ LOG.critical("Giving up on md from %s after %s seconds",
+ md_urls, int(time.time() - start_time))
+
+ self.metadata_address = url2base.get(avail_url)
+ return bool(avail_url)
+
+ def get_data(self):
+ try:
+ if not self.wait_for_metadata_service():
+ return False
+ except IOError:
+ return False
+
+ try:
+ results = util.log_time(LOG.debug,
+ 'Crawl of openstack metadata service',
+ read_metadata_service,
+ args=[self.metadata_address],
+ kwargs={'ssl_details': self.ssl_details})
+ except openstack.NonReadable:
+ return False
+ except openstack.BrokenMetadata:
+ util.logexc(LOG, "Broken metadata address %s",
+ self.metadata_address)
+ return False
+
+ user_dsmode = results.get('dsmode', None)
+ if user_dsmode not in VALID_DSMODES + (None,):
+ LOG.warn("User specified invalid mode: %s" % user_dsmode)
+ user_dsmode = None
+ if user_dsmode == 'disabled':
+ return False
+
+ md = results.get('metadata', {})
+ md = util.mergemanydict([md, DEFAULT_METADATA])
+ self.metadata = md
+ self.ec2_metadata = results.get('ec2-metadata')
+ self.userdata_raw = results.get('userdata')
+ self.version = results['version']
+ self.files.update(results.get('files', {}))
+ self.vendordata_raw = results.get('vendordata')
+ return True
+
+
+def read_metadata_service(base_url, version=None, ssl_details=None):
+ reader = openstack.MetadataReader(base_url, ssl_details=ssl_details)
+ return reader.read_v2(version=version)
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/helpers/__init__.py b/cloudinit/sources/helpers/__init__.py
new file mode 100644
index 00000000..2cf99ec8
--- /dev/null
+++ b/cloudinit/sources/helpers/__init__.py
@@ -0,0 +1,14 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
new file mode 100644
index 00000000..9dbef677
--- /dev/null
+++ b/cloudinit/sources/helpers/openstack.py
@@ -0,0 +1,420 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import abc
+import base64
+import copy
+import functools
+import os
+
+from cloudinit import ec2_utils
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import url_helper
+from cloudinit import util
+
+# For reference: http://tinyurl.com/laora4c
+
+LOG = logging.getLogger(__name__)
+
+FILES_V1 = {
+ # Path <-> (metadata key name, translator function, default value)
+ 'etc/network/interfaces': ('network_config', lambda x: x, ''),
+ 'meta.js': ('meta_js', util.load_json, {}),
+ "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
+}
+KEY_COPIES = (
+ # Cloud-init metadata names <-> (metadata key, is required)
+ ('local-hostname', 'hostname', False),
+ ('instance-id', 'uuid', True),
+)
+OS_VERSIONS = (
+ '2012-08-10', # folsom
+ '2013-04-04', # grizzly
+ '2013-10-17', # havana
+)
+OS_LATEST = 'latest'
+
+
+class NonReadable(IOError):
+ pass
+
+
+class BrokenMetadata(IOError):
+ pass
+
+
+class SourceMixin(object):
+ def _ec2_name_to_device(self, name):
+ if not self.ec2_metadata:
+ return None
+ bdm = self.ec2_metadata.get('block-device-mapping', {})
+ for (ent_name, device) in bdm.items():
+ if name == ent_name:
+ return device
+ return None
+
+ def get_public_ssh_keys(self):
+ name = "public_keys"
+ if self.version == 1:
+ name = "public-keys"
+ return sources.normalize_pubkey_data(self.metadata.get(name))
+
+ def _os_name_to_device(self, name):
+ device = None
+ try:
+ criteria = 'LABEL=%s' % (name)
+ if name == 'swap':
+ criteria = 'TYPE=%s' % (name)
+ dev_entries = util.find_devs_with(criteria)
+ if dev_entries:
+ device = dev_entries[0]
+ except util.ProcessExecutionError:
+ pass
+ return device
+
+ def _validate_device_name(self, device):
+ if not device:
+ return None
+ if not device.startswith("/"):
+ device = "/dev/%s" % device
+ if os.path.exists(device):
+ return device
+ # Durn, try adjusting the mapping
+ remapped = self._remap_device(os.path.basename(device))
+ if remapped:
+ LOG.debug("Remapped device name %s => %s", device, remapped)
+ return remapped
+ return None
+
+ def device_name_to_device(self, name):
+ # Translate a 'name' to a 'physical' device
+ if not name:
+ return None
+ # Try the ec2 mapping first
+ names = [name]
+ if name == 'root':
+ names.insert(0, 'ami')
+ if name == 'ami':
+ names.append('root')
+ device = None
+ LOG.debug("Using ec2 style lookup to find device %s", names)
+ for n in names:
+ device = self._ec2_name_to_device(n)
+ device = self._validate_device_name(device)
+ if device:
+ break
+ # Try the openstack way second
+ if not device:
+ LOG.debug("Using openstack style lookup to find device %s", names)
+ for n in names:
+ device = self._os_name_to_device(n)
+ device = self._validate_device_name(device)
+ if device:
+ break
+ # Ok give up...
+ if not device:
+ return None
+ else:
+ LOG.debug("Mapped %s to device %s", name, device)
+ return device
+
+
+class BaseReader(object):
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, base_path):
+ self.base_path = base_path
+
+ @abc.abstractmethod
+ def _path_join(self, base, *add_ons):
+ pass
+
+ @abc.abstractmethod
+ def _path_exists(self, path):
+ pass
+
+ @abc.abstractmethod
+ def _path_read(self, path):
+ pass
+
+ @abc.abstractmethod
+ def _read_ec2_metadata(self):
+ pass
+
+ def _read_content_path(self, item):
+ path = item.get('content_path', '').lstrip("/")
+ path_pieces = path.split("/")
+ valid_pieces = [p for p in path_pieces if len(p)]
+ if not valid_pieces:
+ raise BrokenMetadata("Item %s has no valid content path" % (item))
+ path = self._path_join(self.base_path, "openstack", *path_pieces)
+ return self._path_read(path)
+
+ def _find_working_version(self, version):
+ search_versions = [version] + list(OS_VERSIONS)
+ for potential_version in search_versions:
+ if not potential_version:
+ continue
+ path = self._path_join(self.base_path, "openstack",
+ potential_version)
+ if self._path_exists(path):
+ if potential_version != version:
+ LOG.warn("Version '%s' not available, attempting to use"
+ " version '%s' instead", version,
+ potential_version)
+ return potential_version
+ LOG.warn("Version '%s' not available, attempting to use '%s'"
+ " instead", version, OS_LATEST)
+ return OS_LATEST
+
+ def read_v2(self, version=None):
+ """Reads a version 2 formatted location.
+
+ Return a dict with metadata, userdata, ec2-metadata, dsmode,
+ network_config, files and version (2).
+
+ If not a valid location, raise a NonReadable exception.
+ """
+
+ def datafiles(version):
+ files = {}
+ files['metadata'] = (
+ # File path to read
+ self._path_join("openstack", version, 'meta_data.json'),
+ # Is it required?
+ True,
+ # Translator function (applied after loading)
+ util.load_json,
+ )
+ files['userdata'] = (
+ self._path_join("openstack", version, 'user_data'),
+ False,
+ lambda x: x,
+ )
+ files['vendordata'] = (
+ self._path_join("openstack", version, 'vendor_data.json'),
+ False,
+ util.load_json,
+ )
+ return files
+
+ version = self._find_working_version(version)
+ results = {
+ 'userdata': '',
+ 'version': 2,
+ }
+ data = datafiles(version)
+ for (name, (path, required, translator)) in data.iteritems():
+ path = self._path_join(self.base_path, path)
+ data = None
+ found = False
+ if self._path_exists(path):
+ try:
+ data = self._path_read(path)
+ except IOError:
+ raise NonReadable("Failed to read: %s" % path)
+ found = True
+ else:
+ if required:
+ raise NonReadable("Missing mandatory path: %s" % path)
+ if found and translator:
+ try:
+ data = translator(data)
+ except Exception as e:
+ raise BrokenMetadata("Failed to process "
+ "path %s: %s" % (path, e))
+ if found:
+ results[name] = data
+
+ metadata = results['metadata']
+ if 'random_seed' in metadata:
+ random_seed = metadata['random_seed']
+ try:
+ metadata['random_seed'] = base64.b64decode(random_seed)
+ except (ValueError, TypeError) as e:
+ raise BrokenMetadata("Badly formatted metadata"
+ " random_seed entry: %s" % e)
+
+ # load any files that were provided
+ files = {}
+ metadata_files = metadata.get('files', [])
+ for item in metadata_files:
+ if 'path' not in item:
+ continue
+ path = item['path']
+ try:
+ files[path] = self._read_content_path(item)
+ except Exception as e:
+ raise BrokenMetadata("Failed to read provided "
+ "file %s: %s" % (path, e))
+ results['files'] = files
+
+ # The 'network_config' item in metadata is a content pointer
+ # to the network config that should be applied. It is just a
+ # ubuntu/debian '/etc/network/interfaces' file.
+ net_item = metadata.get("network_config", None)
+ if net_item:
+ try:
+ results['network_config'] = self._read_content_path(net_item)
+ except IOError as e:
+ raise BrokenMetadata("Failed to read network"
+ " configuration: %s" % (e))
+
+ # To openstack, user can specify meta ('nova boot --meta=key=value')
+ # and those will appear under metadata['meta'].
+ # if they specify 'dsmode' they're indicating the mode that they intend
+ # for this datasource to operate in.
+ try:
+ results['dsmode'] = metadata['meta']['dsmode']
+ except KeyError:
+ pass
+
+ # Read any ec2-metadata (if applicable)
+ results['ec2-metadata'] = self._read_ec2_metadata()
+
+ # Perform some misc. metadata key renames...
+ for (target_key, source_key, is_required) in KEY_COPIES:
+ if is_required and source_key not in metadata:
+ raise BrokenMetadata("No '%s' entry in metadata" % source_key)
+ if source_key in metadata:
+ metadata[target_key] = metadata.get(source_key)
+ return results
+
+
+class ConfigDriveReader(BaseReader):
+ def __init__(self, base_path):
+ super(ConfigDriveReader, self).__init__(base_path)
+
+ def _path_join(self, base, *add_ons):
+ components = [base] + list(add_ons)
+ return os.path.join(*components)
+
+ def _path_exists(self, path):
+ return os.path.exists(path)
+
+ def _path_read(self, path):
+ return util.load_file(path)
+
+ def _read_ec2_metadata(self):
+ path = self._path_join(self.base_path,
+ 'ec2', 'latest', 'meta-data.json')
+ if not self._path_exists(path):
+ return {}
+ else:
+ try:
+ return util.load_json(self._path_read(path))
+ except Exception as e:
+ raise BrokenMetadata("Failed to process "
+ "path %s: %s" % (path, e))
+
+ def read_v1(self):
+ """Reads a version 1 formatted location.
+
+ Return a dict with metadata, userdata, dsmode, files and version (1).
+
+ If not a valid path, raise a NonReadable exception.
+ """
+
+ found = {}
+ for name in FILES_V1.keys():
+ path = self._path_join(self.base_path, name)
+ if self._path_exists(path):
+ found[name] = path
+ if len(found) == 0:
+ raise NonReadable("%s: no files found" % (self.base_path))
+
+ md = {}
+ for (name, (key, translator, default)) in FILES_V1.iteritems():
+ if name in found:
+ path = found[name]
+ try:
+ contents = self._path_read(path)
+ except IOError:
+ raise BrokenMetadata("Failed to read: %s" % path)
+ try:
+ md[key] = translator(contents)
+ except Exception as e:
+ raise BrokenMetadata("Failed to process "
+ "path %s: %s" % (path, e))
+ else:
+ md[key] = copy.deepcopy(default)
+
+ keydata = md['authorized_keys']
+ meta_js = md['meta_js']
+
+ # keydata in meta_js is preferred over "injected"
+ keydata = meta_js.get('public-keys', keydata)
+ if keydata:
+ lines = keydata.splitlines()
+ md['public-keys'] = [l for l in lines
+ if len(l) and not l.startswith("#")]
+
+ # config-drive-v1 has no way for openstack to provide the instance-id
+ # so we copy that into metadata from the user input
+ if 'instance-id' in meta_js:
+ md['instance-id'] = meta_js['instance-id']
+
+ results = {
+ 'version': 1,
+ 'metadata': md,
+ }
+
+ # allow the user to specify 'dsmode' in a meta tag
+ if 'dsmode' in meta_js:
+ results['dsmode'] = meta_js['dsmode']
+
+ # config-drive-v1 has no way of specifying user-data, so the user has
+ # to cheat and stuff it in a meta tag also.
+ results['userdata'] = meta_js.get('user-data', '')
+
+ # this implementation does not support files other than
+ # network/interfaces and authorized_keys...
+ results['files'] = {}
+
+ return results
+
+
+class MetadataReader(BaseReader):
+ def __init__(self, base_url, ssl_details=None, timeout=5, retries=5):
+ super(MetadataReader, self).__init__(base_url)
+ self._url_reader = functools.partial(url_helper.readurl,
+ retries=retries,
+ ssl_details=ssl_details,
+ timeout=timeout)
+ self._url_checker = functools.partial(url_helper.existsurl,
+ ssl_details=ssl_details,
+ timeout=timeout)
+ self._ec2_reader = functools.partial(ec2_utils.get_instance_metadata,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries)
+
+ def _path_read(self, path):
+ return str(self._url_reader(path))
+
+ def _path_exists(self, path):
+ return self._url_checker(path)
+
+ def _path_join(self, base, *add_ons):
+ return url_helper.combine_url(base, *add_ons)
+
+ def _read_ec2_metadata(self):
+ return self._ec2_reader()
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 97ed75ad..76a8e29b 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -22,6 +22,7 @@
import httplib
import time
+import urllib
import requests
from requests import exceptions
@@ -61,6 +62,23 @@ def _cleanurl(url):
return urlunparse(parsed_url)
+def combine_url(base, *add_ons):
+
+ def combine_single(url, add_on):
+ url_parsed = list(urlparse(url))
+ path = url_parsed[2]
+ if path and not path.endswith("/"):
+ path += "/"
+ path += urllib.quote(str(add_on), safe="/:")
+ url_parsed[2] = path
+ return urlunparse(url_parsed)
+
+ url = base
+ for add_on in add_ons:
+ url = combine_single(url, add_on)
+ return url
+
+
# Made to have same accessors as UrlResponse so that the
# read_file_or_url can return this or that object and the
# 'user' of those objects will not need to know the difference.
@@ -129,30 +147,54 @@ class UrlError(IOError):
self.headers = {}
-def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
- headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True, exception_cb=None):
- url = _cleanurl(url)
- req_args = {
- 'url': url,
- }
+def _get_ssl_args(url, ssl_details):
+ ssl_args = {}
scheme = urlparse(url).scheme # pylint: disable=E1101
if scheme == 'https' and ssl_details:
if not SSL_ENABLED:
LOG.warn("SSL is not enabled, cert. verification can not occur!")
else:
if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
- req_args['verify'] = ssl_details['ca_certs']
+ ssl_args['verify'] = ssl_details['ca_certs']
else:
- req_args['verify'] = True
+ ssl_args['verify'] = True
if 'cert_file' in ssl_details and 'key_file' in ssl_details:
- req_args['cert'] = [ssl_details['cert_file'],
+ ssl_args['cert'] = [ssl_details['cert_file'],
ssl_details['key_file']]
elif 'cert_file' in ssl_details:
- req_args['cert'] = str(ssl_details['cert_file'])
+ ssl_args['cert'] = str(ssl_details['cert_file'])
+ return ssl_args
+
+def existsurl(url, ssl_details=None, timeout=None):
+ r = _readurl(url, ssl_details=ssl_details, timeout=timeout,
+ method='HEAD', check_status=False)
+ return r.ok()
+
+
+def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
+ headers=None, headers_cb=None, ssl_details=None,
+ check_status=True, allow_redirects=True, exception_cb=None):
+ return _readurl(url, data=data, timeout=timeout, retries=retries,
+ sec_between=sec_between, headers=headers,
+ headers_cb=headers_cb, ssl_details=ssl_details,
+ check_status=check_status,
+ allow_redirects=allow_redirects,
+ exception_cb=exception_cb)
+
+
+def _readurl(url, data=None, timeout=None, retries=0, sec_between=1,
+ headers=None, headers_cb=None, ssl_details=None,
+ check_status=True, allow_redirects=True, exception_cb=None,
+ method='GET'):
+ url = _cleanurl(url)
+ req_args = {
+ 'url': url,
+ }
+ req_args.update(_get_ssl_args(url, ssl_details))
+ scheme = urlparse(url).scheme # pylint: disable=E1101
req_args['allow_redirects'] = allow_redirects
- req_args['method'] = 'GET'
+ req_args['method'] = method
if timeout is not None:
req_args['timeout'] = max(float(timeout), 0)
if data:
@@ -223,7 +265,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# ssl exceptions are not going to get fixed by waiting a
# few seconds
break
- if exception_cb and not exception_cb(filtered_req_args, excps[-1]):
+ if exception_cb and not exception_cb(req_args.copy(), excps[-1]):
break
if i + 1 < manual_tries and sec_between > 0:
LOG.debug("Please wait %s seconds while we wait to try again",
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 08cdd8c8..87b0c853 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -32,6 +32,7 @@ import glob
import grp
import gzip
import hashlib
+import json
import os
import os.path
import platform
@@ -362,6 +363,15 @@ def multi_log(text, console=True, stderr=True,
log.log(log_level, text)
+def load_json(text, root_types=(dict,)):
+ decoded = json.loads(text)
+ if not isinstance(decoded, tuple(root_types)):
+ expected_types = ", ".join([str(t) for t in root_types])
+ raise TypeError("(%s) root types expected, got %s instead"
+ % (expected_types, type(decoded)))
+ return decoded
+
+
def is_ipv4(instr):
"""determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.')
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 1f4a0a0b..937b88c1 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -9,6 +9,7 @@ from mocker import MockerTestCase
from cloudinit import helpers
from cloudinit import settings
from cloudinit.sources import DataSourceConfigDrive as ds
+from cloudinit.sources.helpers import openstack
from cloudinit import util
from tests.unittests import helpers as unit_helpers
@@ -71,7 +72,7 @@ class TestConfigDriveDataSource(MockerTestCase):
def test_ec2_metadata(self):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- found = ds.read_config_drive_dir(self.tmp)
+ found = ds.read_config_drive(self.tmp)
self.assertTrue('ec2-metadata' in found)
ec2_md = found['ec2-metadata']
self.assertEqual(EC2_META, ec2_md)
@@ -81,7 +82,7 @@ class TestConfigDriveDataSource(MockerTestCase):
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
- found = ds.read_config_drive_dir(self.tmp)
+ found = ds.read_config_drive(self.tmp)
cfg_ds.metadata = found['metadata']
name_tests = {
'ami': '/dev/vda1',
@@ -112,7 +113,7 @@ class TestConfigDriveDataSource(MockerTestCase):
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
- found = ds.read_config_drive_dir(self.tmp)
+ found = ds.read_config_drive(self.tmp)
os_md = found['metadata']
cfg_ds.metadata = os_md
name_tests = {
@@ -140,7 +141,7 @@ class TestConfigDriveDataSource(MockerTestCase):
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
- found = ds.read_config_drive_dir(self.tmp)
+ found = ds.read_config_drive(self.tmp)
ec2_md = found['ec2-metadata']
os_md = found['metadata']
cfg_ds.ec2_metadata = ec2_md
@@ -165,13 +166,13 @@ class TestConfigDriveDataSource(MockerTestCase):
my_mock.replay()
device = cfg_ds.device_name_to_device(name)
self.assertEquals(dev_name, device)
-
+
def test_dev_ec2_map(self):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
- found = ds.read_config_drive_dir(self.tmp)
+ found = ds.read_config_drive(self.tmp)
exists_mock = self.mocker.replace(os.path.exists,
spec=False, passthrough=False)
exists_mock(mocker.ARGS)
@@ -200,10 +201,11 @@ class TestConfigDriveDataSource(MockerTestCase):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- found = ds.read_config_drive_dir(self.tmp)
+ found = ds.read_config_drive(self.tmp)
expected_md = copy(OSTACK_META)
expected_md['instance-id'] = expected_md['uuid']
+ expected_md['local-hostname'] = expected_md['hostname']
self.assertEqual(USER_DATA, found['userdata'])
self.assertEqual(expected_md, found['metadata'])
@@ -219,10 +221,11 @@ class TestConfigDriveDataSource(MockerTestCase):
populate_dir(self.tmp, data)
- found = ds.read_config_drive_dir(self.tmp)
+ found = ds.read_config_drive(self.tmp)
expected_md = copy(OSTACK_META)
expected_md['instance-id'] = expected_md['uuid']
+ expected_md['local-hostname'] = expected_md['hostname']
self.assertEqual(expected_md, found['metadata'])
@@ -235,8 +238,8 @@ class TestConfigDriveDataSource(MockerTestCase):
populate_dir(self.tmp, data)
- self.assertRaises(ds.BrokenConfigDriveDir,
- ds.read_config_drive_dir, self.tmp)
+ self.assertRaises(openstack.BrokenMetadata,
+ ds.read_config_drive, self.tmp)
def test_seed_dir_no_configdrive(self):
"""Verify that no metadata raises NonConfigDriveDir."""
@@ -247,14 +250,14 @@ class TestConfigDriveDataSource(MockerTestCase):
data["openstack/latest/random-file.txt"] = "random-content"
data["content/foo"] = "foocontent"
- self.assertRaises(ds.NonConfigDriveDir,
- ds.read_config_drive_dir, my_d)
+ self.assertRaises(openstack.NonReadable,
+ ds.read_config_drive, my_d)
def test_seed_dir_missing(self):
"""Verify that missing seed_dir raises NonConfigDriveDir."""
my_d = os.path.join(self.tmp, "nonexistantdirectory")
- self.assertRaises(ds.NonConfigDriveDir,
- ds.read_config_drive_dir, my_d)
+ self.assertRaises(openstack.NonReadable,
+ ds.read_config_drive, my_d)
def test_find_candidates(self):
devs_with_answers = {}
@@ -304,7 +307,7 @@ class TestConfigDriveDataSource(MockerTestCase):
def cfg_ds_from_dir(seed_d):
- found = ds.read_config_drive_dir(seed_d)
+ found = ds.read_config_drive(seed_d)
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None,
helpers.Paths({}))
populate_ds_from_read_config(cfg_ds, seed_d, found)
@@ -319,7 +322,7 @@ def populate_ds_from_read_config(cfg_ds, source, results):
cfg_ds.metadata = results.get('metadata')
cfg_ds.ec2_metadata = results.get('ec2-metadata')
cfg_ds.userdata_raw = results.get('userdata')
- cfg_ds.version = results.get('cfgdrive_ver')
+ cfg_ds.version = results.get('version')
def populate_dir(seed_dir, files):
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
new file mode 100644
index 00000000..7d93f1d3
--- /dev/null
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -0,0 +1,122 @@
+import re
+import json
+
+from StringIO import StringIO
+
+from urlparse import urlparse
+
+from tests.unittests import helpers
+
+from cloudinit.sources import DataSourceOpenStack as ds
+from cloudinit.sources.helpers import openstack
+from cloudinit import util
+
+import httpretty as hp
+
+BASE_URL = "http://169.254.169.254"
+PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
+EC2_META = {
+ 'ami-id': 'ami-00000001',
+ 'ami-launch-index': 0,
+ 'ami-manifest-path': 'FIXME',
+ 'hostname': 'sm-foo-test.novalocal',
+ 'instance-action': 'none',
+ 'instance-id': 'i-00000001',
+ 'instance-type': 'm1.tiny',
+ 'local-hostname': 'sm-foo-test.novalocal',
+ 'local-ipv4': '0.0.0.0',
+ 'public-hostname': 'sm-foo-test.novalocal',
+ 'public-ipv4': '0.0.0.1',
+ 'reservation-id': 'r-iru5qm4m',
+}
+USER_DATA = '#!/bin/sh\necho This is user data\n'
+VENDOR_DATA = {
+ 'magic': '',
+}
+OSTACK_META = {
+ 'availability_zone': 'nova',
+ 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
+ {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
+ 'hostname': 'sm-foo-test.novalocal',
+ 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
+ 'name': 'sm-foo-test',
+ 'public_keys': {'mykey': PUBKEY},
+ 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c',
+}
+CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
+CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
+OS_FILES = {
+ 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/2012-08-10/user_data': USER_DATA,
+ 'openstack/content/0000': CONTENT_0,
+ 'openstack/content/0001': CONTENT_1,
+ 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/latest/user_data': USER_DATA,
+ 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA),
+}
+EC2_FILES = {
+ 'latest/user-data': USER_DATA,
+}
+
+
+def _register_uris(version):
+
+ def match_ec2_url(uri, headers):
+ path = uri.path.lstrip("/")
+ if path in EC2_FILES:
+ return (200, headers, EC2_FILES.get(path))
+ if path == 'latest/meta-data':
+ buf = StringIO()
+ for (k, v) in EC2_META.items():
+ if isinstance(v, (list, tuple)):
+ buf.write("%s/" % (k))
+ else:
+ buf.write("%s" % (k))
+ buf.write("\n")
+ return (200, headers, buf.getvalue())
+ if path.startswith('latest/meta-data'):
+ value = None
+ pieces = path.split("/")
+ if path.endswith("/"):
+ pieces = pieces[2:-1]
+ value = util.get_cfg_by_path(EC2_META, pieces)
+ else:
+ pieces = pieces[2:]
+ value = util.get_cfg_by_path(EC2_META, pieces)
+ if value is not None:
+ return (200, headers, str(value))
+ return (404, headers, '')
+
+ def get_request_callback(method, uri, headers):
+ uri = urlparse(uri)
+ path = uri.path.lstrip("/")
+ if path in OS_FILES:
+ return (200, headers, OS_FILES.get(path))
+ return match_ec2_url(uri, headers)
+
+ def head_request_callback(method, uri, headers):
+ uri = urlparse(uri)
+ path = uri.path.lstrip("/")
+ for key in OS_FILES.keys():
+ if key.startswith(path):
+ return (200, headers, '')
+ return (404, headers, '')
+
+ hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'),
+ body=get_request_callback)
+
+ hp.register_uri(hp.HEAD, re.compile(r'http://169.254.169.254/.*'),
+ body=head_request_callback)
+
+
+class TestOpenStackDataSource(helpers.TestCase):
+ VERSION = 'latest'
+
+ @hp.activate
+ def test_fetch(self):
+ _register_uris(self.VERSION)
+ f = ds.read_metadata_service(BASE_URL, version=self.VERSION)
+ self.assertEquals(VENDOR_DATA, f.get('vendordata'))
+ self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])
+ self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
+ self.assertEquals(USER_DATA, f.get('userdata'))
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
index 957dc3f2..dd87665d 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/test_ec2_util.py
@@ -1,6 +1,7 @@
from tests.unittests import helpers
from cloudinit import ec2_utils as eu
+from cloudinit import url_helper as uh
import httpretty as hp
@@ -48,11 +49,11 @@ class TestEc2Util(helpers.TestCase):
body="\n".join(['hostname',
'instance-id',
'ami-launch-index']))
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
status=200, body='123')
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'ami-launch-index'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'ami-launch-index'),
status=200, body='1')
md = eu.get_instance_metadata(self.VERSION, retries=0)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
@@ -66,14 +67,14 @@ class TestEc2Util(helpers.TestCase):
body="\n".join(['hostname',
'instance-id',
'public-keys/']))
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
status=200, body='123')
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
status=200, body='0=my-public-key')
hp.register_uri(hp.GET,
- eu.combine_url(base_url, 'public-keys/0/openssh-key'),
+ uh.combine_url(base_url, 'public-keys/0/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
@@ -87,18 +88,18 @@ class TestEc2Util(helpers.TestCase):
body="\n".join(['hostname',
'instance-id',
'public-keys/']))
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
status=200, body='123')
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
status=200,
body="\n".join(['0=my-public-key', '1=my-other-key']))
hp.register_uri(hp.GET,
- eu.combine_url(base_url, 'public-keys/0/openssh-key'),
+ uh.combine_url(base_url, 'public-keys/0/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
hp.register_uri(hp.GET,
- eu.combine_url(base_url, 'public-keys/1/openssh-key'),
+ uh.combine_url(base_url, 'public-keys/1/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-other-key')
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
@@ -112,20 +113,20 @@ class TestEc2Util(helpers.TestCase):
body="\n".join(['hostname',
'instance-id',
'block-device-mapping/']))
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
status=200, body='123')
hp.register_uri(hp.GET,
- eu.combine_url(base_url, 'block-device-mapping/'),
+ uh.combine_url(base_url, 'block-device-mapping/'),
status=200,
body="\n".join(['ami', 'ephemeral0']))
hp.register_uri(hp.GET,
- eu.combine_url(base_url, 'block-device-mapping/ami'),
+ uh.combine_url(base_url, 'block-device-mapping/ami'),
status=200,
body="sdb")
hp.register_uri(hp.GET,
- eu.combine_url(base_url,
+ uh.combine_url(base_url,
'block-device-mapping/ephemeral0'),
status=200,
body="sdc")