From c104d6dfa464a8906c16b4f09b4b76ab5bf2e4e1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 1 Feb 2014 22:48:55 -0800 Subject: Add a openstack specific datasource Openstack has a unique derivative datasource that is gaining usage. Previously the config drive datasource provided part of this functionality as well as the ec2 datasource, but since new functionality is being added to openstack is seems benefical to combine the used parts into one datasource just made for handling openstack deployments. This patch factors out the common logic shared between the config drive and the openstack metadata datasource and places that in a shared helper file and then creates a new openstack datasource that readers from the openstack metadata service and refactors the config drive datasource to use this common logic. --- cloudinit/ec2_utils.py | 24 +- cloudinit/sources/DataSourceConfigDrive.py | 463 +++++++---------------------- cloudinit/url_helper.py | 64 +++- cloudinit/util.py | 10 + 4 files changed, 177 insertions(+), 384 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 92a22747..80736a8f 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -20,9 +20,9 @@ from urlparse import (urlparse, urlunparse) import functools import json -import urllib from cloudinit import log as logging +from cloudinit import url_helper from cloudinit import util LOG = logging.getLogger(__name__) @@ -37,16 +37,6 @@ def maybe_json_object(text): return False -def combine_url(base, add_on): - base_parsed = list(urlparse(base)) - path = base_parsed[2] - if path and not path.endswith("/"): - path += "/" - path += urllib.quote(str(add_on), safe="/:") - base_parsed[2] = path - return urlunparse(base_parsed) - - # See: http://bit.ly/TyoUQs # class MetadataMaterializer(object): @@ -118,14 +108,14 @@ class MetadataMaterializer(object): (leaves, children) = self._parse(blob) child_contents = {} for c in children: - child_url = combine_url(base_url, c) + child_url = url_helper.combine_url(base_url, c) if not child_url.endswith("/"): child_url += "/" child_blob = str(self._caller(child_url)) child_contents[c] = self._materialize(child_blob, child_url) leaf_contents = {} for (field, resource) in leaves.items(): - leaf_url = combine_url(base_url, resource) + leaf_url = url_helper.combine_url(base_url, resource) leaf_blob = str(self._caller(leaf_url)) leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob) joined = {} @@ -141,8 +131,8 @@ class MetadataMaterializer(object): def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5): - ud_url = combine_url(metadata_address, api_version) - ud_url = combine_url(ud_url, 'user-data') + ud_url = url_helper.combine_url(metadata_address, api_version) + ud_url = url_helper.combine_url(ud_url, 'user-data') try: response = util.read_file_or_url(ud_url, ssl_details=ssl_details, @@ -157,8 +147,8 @@ def get_instance_userdata(api_version='latest', def get_instance_metadata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5): - md_url = combine_url(metadata_address, api_version) - md_url = combine_url(md_url, 'meta-data') + md_url = url_helper.combine_url(metadata_address, api_version) + md_url = url_helper.combine_url(md_url, 'meta-data') caller = functools.partial(util.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 4f437244..c45a1119 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -18,181 +18,79 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import base64 -import json import os from cloudinit import log as logging from cloudinit import sources from cloudinit import util +from cloudinit.sources.helpers import openstack + LOG = logging.getLogger(__name__) # Various defaults/constants... DEFAULT_IID = "iid-dsconfigdrive" DEFAULT_MODE = 'pass' -CFG_DRIVE_FILES_V1 = [ - "etc/network/interfaces", - "root/.ssh/authorized_keys", - "meta.js", -] DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } VALID_DSMODES = ("local", "net", "pass", "disabled") +FS_TYPES = ('vfat', 'iso9660') +LABEL_TYPES = ('config-2',) +OPTICAL_DEVICES = tuple(('/dev/sr%s' % i for i in range(0, 2))) -class ConfigDriveHelper(object): - def __init__(self, distro): - self.distro = distro - - def on_first_boot(self, data): - if not data: - data = {} - if 'network_config' in data: - LOG.debug("Updating network interfaces from config drive") - self.distro.apply_network(data['network_config']) - files = data.get('files') - if files: - LOG.debug("Writing %s injected files", len(files)) - try: - write_files(files) - except IOError: - util.logexc(LOG, "Failed writing files") - - -class DataSourceConfigDrive(sources.DataSource): +class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): def __init__(self, sys_cfg, distro, paths): - sources.DataSource.__init__(self, sys_cfg, distro, paths) + super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) self.source = None self.dsmode = 'local' self.seed_dir = os.path.join(paths.seed_dir, 'config_drive') self.version = None self.ec2_metadata = None - self.helper = ConfigDriveHelper(distro) + self.files = {} def __str__(self): root = sources.DataSource.__str__(self) - mstr = "%s [%s,ver=%s]" % (root, - self.dsmode, - self.version) + mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) mstr += "[source=%s]" % (self.source) return mstr - def _ec2_name_to_device(self, name): - if not self.ec2_metadata: - return None - bdm = self.ec2_metadata.get('block-device-mapping', {}) - for (ent_name, device) in bdm.items(): - if name == ent_name: - return device - return None - - def _os_name_to_device(self, name): - device = None - try: - criteria = 'LABEL=%s' % (name) - if name in ['swap']: - criteria = 'TYPE=%s' % (name) - dev_entries = util.find_devs_with(criteria) - if dev_entries: - device = dev_entries[0] - except util.ProcessExecutionError: - pass - return device - - def _validate_device_name(self, device): - if not device: - return None - if not device.startswith("/"): - device = "/dev/%s" % device - if os.path.exists(device): - return device - # Durn, try adjusting the mapping - remapped = self._remap_device(os.path.basename(device)) - if remapped: - LOG.debug("Remapped device name %s => %s", device, remapped) - return remapped - return None - - def device_name_to_device(self, name): - # Translate a 'name' to a 'physical' device - if not name: - return None - # Try the ec2 mapping first - names = [name] - if name == 'root': - names.insert(0, 'ami') - if name == 'ami': - names.append('root') - device = None - LOG.debug("Using ec2 metadata lookup to find device %s", names) - for n in names: - device = self._ec2_name_to_device(n) - device = self._validate_device_name(device) - if device: - break - # Try the openstack way second - if not device: - LOG.debug("Using os lookup to find device %s", names) - for n in names: - device = self._os_name_to_device(n) - device = self._validate_device_name(device) - if device: - break - # Ok give up... - if not device: - return None - else: - LOG.debug("Using cfg drive lookup mapped to device %s", device) - return device - def get_data(self): found = None md = {} - results = {} if os.path.isdir(self.seed_dir): try: - results = read_config_drive_dir(self.seed_dir) + results = read_config_drive(self.seed_dir) found = self.seed_dir - except NonConfigDriveDir: + except openstack.NonReadable: util.logexc(LOG, "Failed reading config drive from %s", self.seed_dir) if not found: - devlist = find_candidate_devs() - for dev in devlist: + for dev in find_candidate_devs(): try: - results = util.mount_cb(dev, read_config_drive_dir) + results = util.mount_cb(dev, read_config_drive) found = dev - break - except (NonConfigDriveDir, util.MountFailedError): + except openstack.NonReadable: pass - except BrokenConfigDriveDir: - util.logexc(LOG, "broken config drive: %s", dev) - + except util.MountFailedError: + pass + except openstack.BrokenMetadata: + util.logexc(LOG, "Broken config drive: %s", dev) + if found: + break if not found: return False - md = results['metadata'] + md = results.get('metadata', {}) md = util.mergemanydict([md, DEFAULT_METADATA]) - - # Perform some metadata 'fixups' - # - # OpenStack uses the 'hostname' key - # while most of cloud-init uses the metadata - # 'local-hostname' key instead so if it doesn't - # exist we need to make sure its copied over. - for (tgt, src) in [('local-hostname', 'hostname')]: - if tgt not in md and src in md: - md[tgt] = md[src] - user_dsmode = results.get('dsmode', None) if user_dsmode not in VALID_DSMODES + (None,): - LOG.warn("user specified invalid mode: %s" % user_dsmode) + LOG.warn("User specified invalid mode: %s" % user_dsmode) user_dsmode = None - dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'], + dsmode = get_ds_mode(cfgdrv_ver=results['version'], ds_cfg=self.ds_cfg.get('dsmode'), user=user_dsmode) @@ -209,7 +107,7 @@ class DataSourceConfigDrive(sources.DataSource): prev_iid = get_previous_iid(self.paths) cur_iid = md['instance-id'] if prev_iid != cur_iid and self.dsmode == "local": - self.helper.on_first_boot(results) + on_first_boot(results, distro=self.distro) # dsmode != self.dsmode here if: # * dsmode = "pass", pass means it should only copy files and then @@ -225,16 +123,11 @@ class DataSourceConfigDrive(sources.DataSource): self.metadata = md self.ec2_metadata = results.get('ec2-metadata') self.userdata_raw = results.get('userdata') - self.version = results['cfgdrive_ver'] - + self.version = results['version'] + self.files.update(results.get('files', {})) + self.vendordata_raw = results.get('vendordata') return True - def get_public_ssh_keys(self): - name = "public_keys" - if self.version == 1: - name = "public-keys" - return sources.normalize_pubkey_data(self.metadata.get(name)) - class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): @@ -242,220 +135,6 @@ class DataSourceConfigDriveNet(DataSourceConfigDrive): self.dsmode = 'net' -class NonConfigDriveDir(Exception): - pass - - -class BrokenConfigDriveDir(Exception): - pass - - -def find_candidate_devs(): - """Return a list of devices that may contain the config drive. - - The returned list is sorted by search order where the first item has - should be searched first (highest priority) - - config drive v1: - Per documentation, this is "associated as the last available disk on the - instance", and should be VFAT. - Currently, we do not restrict search list to "last available disk" - - config drive v2: - Disk should be: - * either vfat or iso9660 formated - * labeled with 'config-2' - """ - - # Query optical drive to get it in blkid cache for 2.6 kernels - util.find_devs_with(path="/dev/sr0") - util.find_devs_with(path="/dev/sr1") - - by_fstype = (util.find_devs_with("TYPE=vfat") + - util.find_devs_with("TYPE=iso9660")) - by_label = util.find_devs_with("LABEL=config-2") - - # give preference to "last available disk" (vdb over vda) - # note, this is not a perfect rendition of that. - by_fstype.sort(reverse=True) - by_label.sort(reverse=True) - - # combine list of items by putting by-label items first - # followed by fstype items, but with dupes removed - combined = (by_label + [d for d in by_fstype if d not in by_label]) - - # We are looking for block device (sda, not sda1), ignore partitions - combined = [d for d in combined if not util.is_partition(d)] - - return combined - - -def read_config_drive_dir(source_dir): - last_e = NonConfigDriveDir("Not found") - for finder in (read_config_drive_dir_v2, read_config_drive_dir_v1): - try: - data = finder(source_dir) - return data - except NonConfigDriveDir as exc: - last_e = exc - raise last_e - - -def read_config_drive_dir_v2(source_dir, version="2012-08-10"): - - if (not os.path.isdir(os.path.join(source_dir, "openstack", version)) and - os.path.isdir(os.path.join(source_dir, "openstack", "latest"))): - LOG.warn("version '%s' not available, attempting to use 'latest'" % - version) - version = "latest" - - datafiles = ( - ('metadata', - "openstack/%s/meta_data.json" % version, True, json.loads), - ('userdata', "openstack/%s/user_data" % version, False, None), - ('ec2-metadata', "ec2/latest/meta-data.json", False, json.loads), - ) - - results = {'userdata': None} - for (name, path, required, process) in datafiles: - fpath = os.path.join(source_dir, path) - data = None - found = False - if os.path.isfile(fpath): - try: - data = util.load_file(fpath) - except IOError: - raise BrokenConfigDriveDir("Failed to read: %s" % fpath) - found = True - elif required: - raise NonConfigDriveDir("Missing mandatory path: %s" % fpath) - - if found and process: - try: - data = process(data) - except Exception as exc: - raise BrokenConfigDriveDir(("Failed to process " - "path: %s") % fpath) - - if found: - results[name] = data - - # instance-id is 'uuid' for openstack. just copy it to instance-id. - if 'instance-id' not in results['metadata']: - try: - results['metadata']['instance-id'] = results['metadata']['uuid'] - except KeyError: - raise BrokenConfigDriveDir("No uuid entry in metadata") - - if 'random_seed' in results['metadata']: - random_seed = results['metadata']['random_seed'] - try: - results['metadata']['random_seed'] = base64.b64decode(random_seed) - except (ValueError, TypeError) as exc: - raise BrokenConfigDriveDir("Badly formatted random_seed: %s" % exc) - - def read_content_path(item): - # do not use os.path.join here, as content_path starts with / - cpath = os.path.sep.join((source_dir, "openstack", - "./%s" % item['content_path'])) - return util.load_file(cpath) - - files = {} - try: - for item in results['metadata'].get('files', {}): - files[item['path']] = read_content_path(item) - - # the 'network_config' item in metadata is a content pointer - # to the network config that should be applied. - # in folsom, it is just a '/etc/network/interfaces' file. - item = results['metadata'].get("network_config", None) - if item: - results['network_config'] = read_content_path(item) - except Exception as exc: - raise BrokenConfigDriveDir("Failed to read file %s: %s" % (item, exc)) - - # to openstack, user can specify meta ('nova boot --meta=key=value') and - # those will appear under metadata['meta']. - # if they specify 'dsmode' they're indicating the mode that they intend - # for this datasource to operate in. - try: - results['dsmode'] = results['metadata']['meta']['dsmode'] - except KeyError: - pass - - results['files'] = files - results['cfgdrive_ver'] = 2 - return results - - -def read_config_drive_dir_v1(source_dir): - """ - read source_dir, and return a tuple with metadata dict, user-data, - files and version (1). If not a valid dir, raise a NonConfigDriveDir - """ - - found = {} - for af in CFG_DRIVE_FILES_V1: - fn = os.path.join(source_dir, af) - if os.path.isfile(fn): - found[af] = fn - - if len(found) == 0: - raise NonConfigDriveDir("%s: %s" % (source_dir, "no files found")) - - md = {} - keydata = "" - if "etc/network/interfaces" in found: - fn = found["etc/network/interfaces"] - md['network_config'] = util.load_file(fn) - - if "root/.ssh/authorized_keys" in found: - fn = found["root/.ssh/authorized_keys"] - keydata = util.load_file(fn) - - meta_js = {} - if "meta.js" in found: - fn = found['meta.js'] - content = util.load_file(fn) - try: - # Just check if its really json... - meta_js = json.loads(content) - if not isinstance(meta_js, (dict)): - raise TypeError("Dict expected for meta.js root node") - except (ValueError, TypeError) as e: - raise NonConfigDriveDir("%s: %s, %s" % - (source_dir, "invalid json in meta.js", e)) - md['meta_js'] = content - - # keydata in meta_js is preferred over "injected" - keydata = meta_js.get('public-keys', keydata) - if keydata: - lines = keydata.splitlines() - md['public-keys'] = [l for l in lines - if len(l) and not l.startswith("#")] - - # config-drive-v1 has no way for openstack to provide the instance-id - # so we copy that into metadata from the user input - if 'instance-id' in meta_js: - md['instance-id'] = meta_js['instance-id'] - - results = {'cfgdrive_ver': 1, 'metadata': md} - - # allow the user to specify 'dsmode' in a meta tag - if 'dsmode' in meta_js: - results['dsmode'] = meta_js['dsmode'] - - # config-drive-v1 has no way of specifying user-data, so the user has - # to cheat and stuff it in a meta tag also. - results['userdata'] = meta_js.get('user-data') - - # this implementation does not support files - # (other than network/interfaces and authorized_keys) - results['files'] = [] - - return results - - def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None): """Determine what mode should be used. valid values are 'pass', 'disabled', 'local', 'net' @@ -481,6 +160,21 @@ def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None): return "net" +def read_config_drive(source_dir, version="2012-08-10"): + reader = openstack.ConfigDriveReader(source_dir) + finders = [ + (reader.read_v2, [], {'version': version}), + (reader.read_v1, [], {}), + ] + excps = [] + for (functor, args, kwargs) in finders: + try: + return functor(*args, **kwargs) + except openstack.NonReadable as e: + excps.append(e) + raise excps[-1] + + def get_previous_iid(paths): # interestingly, for this purpose the "previous" instance-id is the current # instance-id. cloud-init hasn't moved them over yet as this datasource @@ -492,17 +186,76 @@ def get_previous_iid(paths): return None -def write_files(files): - for (name, content) in files.iteritems(): - if name[0] != os.sep: - name = os.sep + name - util.write_file(name, content, mode=0660) +def on_first_boot(data, distro=None): + """Performs any first-boot actions using data read from a config-drive.""" + if not isinstance(data, dict): + raise TypeError("Config-drive data expected to be a dict; not %s" + % (type(data))) + net_conf = data.get("network_config", '') + if net_conf and distro: + LOG.debug("Updating network interfaces from config drive") + distro.apply_network(net_conf) + files = data.get('files', {}) + if files: + LOG.debug("Writing %s injected files", len(files)) + for (filename, content) in files.iteritems(): + if not filename.startswith(os.sep): + filename = os.sep + filename + try: + util.write_file(filename, content, mode=0660) + except IOError: + util.logexc(LOG, "Failed writing file: %s", filename) + + +def find_candidate_devs(probe_optical=True): + """Return a list of devices that may contain the config drive. + + The returned list is sorted by search order where the first item has + should be searched first (highest priority) + + config drive v1: + Per documentation, this is "associated as the last available disk on the + instance", and should be VFAT. + Currently, we do not restrict search list to "last available disk" + + config drive v2: + Disk should be: + * either vfat or iso9660 formated + * labeled with 'config-2' + """ + # query optical drive to get it in blkid cache for 2.6 kernels + if probe_optical: + for device in OPTICAL_DEVICES: + try: + util.find_devs_with(path=device) + except util.ProcessExecutionError: + pass + + by_fstype = [] + for fs_type in FS_TYPES: + by_fstype.extend(util.find_devs_with("TYPE=%s" % (fs_type))) + + by_label = [] + for label in LABEL_TYPES: + by_label.extend(util.find_devs_with("LABEL=%s" % (label))) + + # give preference to "last available disk" (vdb over vda) + # note, this is not a perfect rendition of that. + by_fstype.sort(reverse=True) + by_label.sort(reverse=True) + + # combine list of items by putting by-label items first + # followed by fstype items, but with dupes removed + combined = (by_label + [d for d in by_fstype if d not in by_label]) + + # we are looking for block device (sda, not sda1), ignore partitions + return [d for d in combined if not util.is_partition(d)] # Used to match classes to dependencies datasources = [ - (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )), - (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )), + (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 19a30409..5c33d1e4 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -21,6 +21,7 @@ # along with this program. If not, see . import time +import urllib import requests from requests import exceptions @@ -58,6 +59,23 @@ def _cleanurl(url): return urlunparse(parsed_url) +def combine_url(base, *add_ons): + + def combine_single(url, add_on): + url_parsed = list(urlparse(url)) + path = url_parsed[2] + if path and not path.endswith("/"): + path += "/" + path += urllib.quote(str(add_on), safe="/:") + url_parsed[2] = path + return urlunparse(url_parsed) + + url = base + for add_on in add_ons: + url = combine_single(url, add_on) + return url + + class UrlResponse(object): def __init__(self, response): self._response = response @@ -101,30 +119,52 @@ class UrlError(IOError): self.headers = {} -def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True): - url = _cleanurl(url) - req_args = { - 'url': url, - } +def _get_ssl_args(url, ssl_details): + ssl_args = {} scheme = urlparse(url).scheme # pylint: disable=E1101 if scheme == 'https' and ssl_details: if not SSL_ENABLED: LOG.warn("SSL is not enabled, cert. verification can not occur!") else: if 'ca_certs' in ssl_details and ssl_details['ca_certs']: - req_args['verify'] = ssl_details['ca_certs'] + ssl_args['verify'] = ssl_details['ca_certs'] else: - req_args['verify'] = True + ssl_args['verify'] = True if 'cert_file' in ssl_details and 'key_file' in ssl_details: - req_args['cert'] = [ssl_details['cert_file'], + ssl_args['cert'] = [ssl_details['cert_file'], ssl_details['key_file']] elif 'cert_file' in ssl_details: - req_args['cert'] = str(ssl_details['cert_file']) + ssl_args['cert'] = str(ssl_details['cert_file']) + return ssl_args + +def existsurl(url, ssl_details=None, timeout=None): + r = _readurl(url, ssl_details=ssl_details, timeout=timeout, + method='HEAD', check_status=False) + return r.ok() + + +def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + headers=None, headers_cb=None, ssl_details=None, + check_status=True, allow_redirects=True): + return _readurl(url, data=data, timeout=timeout, retries=retries, + sec_between=sec_between, headers=headers, + headers_cb=headers_cb, ssl_details=ssl_details, + check_status=check_status, + allow_redirects=allow_redirects) + + +def _readurl(url, data=None, timeout=None, retries=0, sec_between=1, + headers=None, headers_cb=None, ssl_details=None, + check_status=True, allow_redirects=True, method='GET'): + url = _cleanurl(url) + req_args = { + 'url': url, + } + req_args.update(_get_ssl_args(url, ssl_details)) + scheme = urlparse(url).scheme # pylint: disable=E1101 req_args['allow_redirects'] = allow_redirects - req_args['method'] = 'GET' + req_args['method'] = method if timeout is not None: req_args['timeout'] = max(float(timeout), 0) if data: diff --git a/cloudinit/util.py b/cloudinit/util.py index 3ce54f28..9bafd5b3 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -31,6 +31,7 @@ import glob import grp import gzip import hashlib +import json import os import os.path import platform @@ -385,6 +386,15 @@ def multi_log(text, console=True, stderr=True, log.log(log_level, text) +def load_json(text, root_types=(dict,)): + decoded = json.loads(text) + if not isinstance(decoded, tuple(root_types)): + expected_types = ", ".join([str(t) for t in root_types]) + raise TypeError("(%s) root types expected, got %s instead" + % (expected_types, type(decoded))) + return decoded + + def is_ipv4(instr): """determine if input string is a ipv4 address. return boolean.""" toks = instr.split('.') -- cgit v1.2.3 From 810df2c55c108e7e4064263e508d9786d8b1dc8e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 1 Feb 2014 22:58:58 -0800 Subject: Don't forget the rest of the files! --- cloudinit/sources/DataSourceOpenStack.py | 163 +++++++++ cloudinit/sources/helpers/__init__.py | 14 + cloudinit/sources/helpers/openstack.py | 420 ++++++++++++++++++++++ tests/unittests/test_datasource/test_openstack.py | 122 +++++++ 4 files changed, 719 insertions(+) create mode 100644 cloudinit/sources/DataSourceOpenStack.py create mode 100644 cloudinit/sources/helpers/__init__.py create mode 100644 cloudinit/sources/helpers/openstack.py create mode 100644 tests/unittests/test_datasource/test_openstack.py (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py new file mode 100644 index 00000000..44889f4e --- /dev/null +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -0,0 +1,163 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2014 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import time + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import url_helper +from cloudinit import util + +from cloudinit.sources.helpers import openstack + +LOG = logging.getLogger(__name__) + +# Various defaults/constants... +DEF_MD_URL = "http://169.254.169.254" +DEFAULT_IID = "iid-dsopenstack" +DEFAULT_METADATA = { + "instance-id": DEFAULT_IID, +} +VALID_DSMODES = ("net", "disabled") + + +class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) + self.dsmode = 'net' + self.metadata_address = None + self.ssl_details = util.fetch_ssl_details(self.paths) + self.version = None + self.files = {} + + def __str__(self): + root = sources.DataSource.__str__(self) + mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) + return mstr + + def _get_url_settings(self): + # TODO(harlowja): this is shared with ec2 datasource, we should just + # move it to a shared location instead... + ds_cfg = self.ds_cfg + if not ds_cfg: + ds_cfg = {} + max_wait = 120 + try: + max_wait = int(ds_cfg.get("max_wait", max_wait)) + except Exception: + util.logexc(LOG, "Failed to get max wait. using %s", max_wait) + + timeout = 50 + try: + timeout = max(0, int(ds_cfg.get("timeout", timeout))) + except Exception: + util.logexc(LOG, "Failed to get timeout, using %s", timeout) + return (max_wait, timeout) + + def wait_for_metadata_service(self): + ds_cfg = self.ds_cfg + if not ds_cfg: + ds_cfg = {} + urls = ds_cfg.get("metadata_urls", [DEF_MD_URL]) + filtered = [x for x in urls if util.is_resolvable_url(x)] + if set(filtered) != set(urls): + LOG.debug("Removed the following from metadata urls: %s", + list((set(urls) - set(filtered)))) + if len(filtered): + urls = filtered + else: + LOG.warn("Empty metadata url list! using default list") + urls = [DEF_MD_URL] + + md_urls = [] + url2base = {} + for url in urls: + md_url = url_helper.combine_url(url, 'openstack', + openstack.OS_LATEST, + 'meta_data.json') + md_urls.append(md_url) + url2base[md_url] = url + + (max_wait, timeout) = self._get_url_settings() + if max_wait <= 0: + return False + start_time = time.time() + avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, + timeout=timeout, + status_cb=LOG.warn) + if avail_url: + LOG.debug("Using metadata source: '%s'", url2base[avail_url]) + else: + LOG.critical("Giving up on md from %s after %s seconds", + md_urls, int(time.time() - start_time)) + + self.metadata_address = url2base.get(avail_url) + return bool(avail_url) + + def get_data(self): + try: + if not self.wait_for_metadata_service(): + return False + except IOError: + return False + + try: + results = util.log_time(LOG.debug, + 'Crawl of openstack metadata service', + read_metadata_service, + args=[self.metadata_address], + kwargs={'ssl_details': self.ssl_details}) + except openstack.NonReadable: + return False + except openstack.BrokenMetadata: + util.logexc(LOG, "Broken metadata address %s", + self.metadata_address) + return False + + user_dsmode = results.get('dsmode', None) + if user_dsmode not in VALID_DSMODES + (None,): + LOG.warn("User specified invalid mode: %s" % user_dsmode) + user_dsmode = None + if user_dsmode == 'disabled': + return False + + md = results.get('metadata', {}) + md = util.mergemanydict([md, DEFAULT_METADATA]) + self.metadata = md + self.ec2_metadata = results.get('ec2-metadata') + self.userdata_raw = results.get('userdata') + self.version = results['version'] + self.files.update(results.get('files', {})) + self.vendordata_raw = results.get('vendordata') + return True + + +def read_metadata_service(base_url, version=None, ssl_details=None): + reader = openstack.MetadataReader(base_url, ssl_details=ssl_details) + return reader.read_v2(version=version) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/helpers/__init__.py b/cloudinit/sources/helpers/__init__.py new file mode 100644 index 00000000..2cf99ec8 --- /dev/null +++ b/cloudinit/sources/helpers/__init__.py @@ -0,0 +1,14 @@ +# vi: ts=4 expandtab +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py new file mode 100644 index 00000000..9dbef677 --- /dev/null +++ b/cloudinit/sources/helpers/openstack.py @@ -0,0 +1,420 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import abc +import base64 +import copy +import functools +import os + +from cloudinit import ec2_utils +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import url_helper +from cloudinit import util + +# For reference: http://tinyurl.com/laora4c + +LOG = logging.getLogger(__name__) + +FILES_V1 = { + # Path <-> (metadata key name, translator function, default value) + 'etc/network/interfaces': ('network_config', lambda x: x, ''), + 'meta.js': ('meta_js', util.load_json, {}), + "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''), +} +KEY_COPIES = ( + # Cloud-init metadata names <-> (metadata key, is required) + ('local-hostname', 'hostname', False), + ('instance-id', 'uuid', True), +) +OS_VERSIONS = ( + '2012-08-10', # folsom + '2013-04-04', # grizzly + '2013-10-17', # havana +) +OS_LATEST = 'latest' + + +class NonReadable(IOError): + pass + + +class BrokenMetadata(IOError): + pass + + +class SourceMixin(object): + def _ec2_name_to_device(self, name): + if not self.ec2_metadata: + return None + bdm = self.ec2_metadata.get('block-device-mapping', {}) + for (ent_name, device) in bdm.items(): + if name == ent_name: + return device + return None + + def get_public_ssh_keys(self): + name = "public_keys" + if self.version == 1: + name = "public-keys" + return sources.normalize_pubkey_data(self.metadata.get(name)) + + def _os_name_to_device(self, name): + device = None + try: + criteria = 'LABEL=%s' % (name) + if name == 'swap': + criteria = 'TYPE=%s' % (name) + dev_entries = util.find_devs_with(criteria) + if dev_entries: + device = dev_entries[0] + except util.ProcessExecutionError: + pass + return device + + def _validate_device_name(self, device): + if not device: + return None + if not device.startswith("/"): + device = "/dev/%s" % device + if os.path.exists(device): + return device + # Durn, try adjusting the mapping + remapped = self._remap_device(os.path.basename(device)) + if remapped: + LOG.debug("Remapped device name %s => %s", device, remapped) + return remapped + return None + + def device_name_to_device(self, name): + # Translate a 'name' to a 'physical' device + if not name: + return None + # Try the ec2 mapping first + names = [name] + if name == 'root': + names.insert(0, 'ami') + if name == 'ami': + names.append('root') + device = None + LOG.debug("Using ec2 style lookup to find device %s", names) + for n in names: + device = self._ec2_name_to_device(n) + device = self._validate_device_name(device) + if device: + break + # Try the openstack way second + if not device: + LOG.debug("Using openstack style lookup to find device %s", names) + for n in names: + device = self._os_name_to_device(n) + device = self._validate_device_name(device) + if device: + break + # Ok give up... + if not device: + return None + else: + LOG.debug("Mapped %s to device %s", name, device) + return device + + +class BaseReader(object): + __metaclass__ = abc.ABCMeta + + def __init__(self, base_path): + self.base_path = base_path + + @abc.abstractmethod + def _path_join(self, base, *add_ons): + pass + + @abc.abstractmethod + def _path_exists(self, path): + pass + + @abc.abstractmethod + def _path_read(self, path): + pass + + @abc.abstractmethod + def _read_ec2_metadata(self): + pass + + def _read_content_path(self, item): + path = item.get('content_path', '').lstrip("/") + path_pieces = path.split("/") + valid_pieces = [p for p in path_pieces if len(p)] + if not valid_pieces: + raise BrokenMetadata("Item %s has no valid content path" % (item)) + path = self._path_join(self.base_path, "openstack", *path_pieces) + return self._path_read(path) + + def _find_working_version(self, version): + search_versions = [version] + list(OS_VERSIONS) + for potential_version in search_versions: + if not potential_version: + continue + path = self._path_join(self.base_path, "openstack", + potential_version) + if self._path_exists(path): + if potential_version != version: + LOG.warn("Version '%s' not available, attempting to use" + " version '%s' instead", version, + potential_version) + return potential_version + LOG.warn("Version '%s' not available, attempting to use '%s'" + " instead", version, OS_LATEST) + return OS_LATEST + + def read_v2(self, version=None): + """Reads a version 2 formatted location. + + Return a dict with metadata, userdata, ec2-metadata, dsmode, + network_config, files and version (2). + + If not a valid location, raise a NonReadable exception. + """ + + def datafiles(version): + files = {} + files['metadata'] = ( + # File path to read + self._path_join("openstack", version, 'meta_data.json'), + # Is it required? + True, + # Translator function (applied after loading) + util.load_json, + ) + files['userdata'] = ( + self._path_join("openstack", version, 'user_data'), + False, + lambda x: x, + ) + files['vendordata'] = ( + self._path_join("openstack", version, 'vendor_data.json'), + False, + util.load_json, + ) + return files + + version = self._find_working_version(version) + results = { + 'userdata': '', + 'version': 2, + } + data = datafiles(version) + for (name, (path, required, translator)) in data.iteritems(): + path = self._path_join(self.base_path, path) + data = None + found = False + if self._path_exists(path): + try: + data = self._path_read(path) + except IOError: + raise NonReadable("Failed to read: %s" % path) + found = True + else: + if required: + raise NonReadable("Missing mandatory path: %s" % path) + if found and translator: + try: + data = translator(data) + except Exception as e: + raise BrokenMetadata("Failed to process " + "path %s: %s" % (path, e)) + if found: + results[name] = data + + metadata = results['metadata'] + if 'random_seed' in metadata: + random_seed = metadata['random_seed'] + try: + metadata['random_seed'] = base64.b64decode(random_seed) + except (ValueError, TypeError) as e: + raise BrokenMetadata("Badly formatted metadata" + " random_seed entry: %s" % e) + + # load any files that were provided + files = {} + metadata_files = metadata.get('files', []) + for item in metadata_files: + if 'path' not in item: + continue + path = item['path'] + try: + files[path] = self._read_content_path(item) + except Exception as e: + raise BrokenMetadata("Failed to read provided " + "file %s: %s" % (path, e)) + results['files'] = files + + # The 'network_config' item in metadata is a content pointer + # to the network config that should be applied. It is just a + # ubuntu/debian '/etc/network/interfaces' file. + net_item = metadata.get("network_config", None) + if net_item: + try: + results['network_config'] = self._read_content_path(net_item) + except IOError as e: + raise BrokenMetadata("Failed to read network" + " configuration: %s" % (e)) + + # To openstack, user can specify meta ('nova boot --meta=key=value') + # and those will appear under metadata['meta']. + # if they specify 'dsmode' they're indicating the mode that they intend + # for this datasource to operate in. + try: + results['dsmode'] = metadata['meta']['dsmode'] + except KeyError: + pass + + # Read any ec2-metadata (if applicable) + results['ec2-metadata'] = self._read_ec2_metadata() + + # Perform some misc. metadata key renames... + for (target_key, source_key, is_required) in KEY_COPIES: + if is_required and source_key not in metadata: + raise BrokenMetadata("No '%s' entry in metadata" % source_key) + if source_key in metadata: + metadata[target_key] = metadata.get(source_key) + return results + + +class ConfigDriveReader(BaseReader): + def __init__(self, base_path): + super(ConfigDriveReader, self).__init__(base_path) + + def _path_join(self, base, *add_ons): + components = [base] + list(add_ons) + return os.path.join(*components) + + def _path_exists(self, path): + return os.path.exists(path) + + def _path_read(self, path): + return util.load_file(path) + + def _read_ec2_metadata(self): + path = self._path_join(self.base_path, + 'ec2', 'latest', 'meta-data.json') + if not self._path_exists(path): + return {} + else: + try: + return util.load_json(self._path_read(path)) + except Exception as e: + raise BrokenMetadata("Failed to process " + "path %s: %s" % (path, e)) + + def read_v1(self): + """Reads a version 1 formatted location. + + Return a dict with metadata, userdata, dsmode, files and version (1). + + If not a valid path, raise a NonReadable exception. + """ + + found = {} + for name in FILES_V1.keys(): + path = self._path_join(self.base_path, name) + if self._path_exists(path): + found[name] = path + if len(found) == 0: + raise NonReadable("%s: no files found" % (self.base_path)) + + md = {} + for (name, (key, translator, default)) in FILES_V1.iteritems(): + if name in found: + path = found[name] + try: + contents = self._path_read(path) + except IOError: + raise BrokenMetadata("Failed to read: %s" % path) + try: + md[key] = translator(contents) + except Exception as e: + raise BrokenMetadata("Failed to process " + "path %s: %s" % (path, e)) + else: + md[key] = copy.deepcopy(default) + + keydata = md['authorized_keys'] + meta_js = md['meta_js'] + + # keydata in meta_js is preferred over "injected" + keydata = meta_js.get('public-keys', keydata) + if keydata: + lines = keydata.splitlines() + md['public-keys'] = [l for l in lines + if len(l) and not l.startswith("#")] + + # config-drive-v1 has no way for openstack to provide the instance-id + # so we copy that into metadata from the user input + if 'instance-id' in meta_js: + md['instance-id'] = meta_js['instance-id'] + + results = { + 'version': 1, + 'metadata': md, + } + + # allow the user to specify 'dsmode' in a meta tag + if 'dsmode' in meta_js: + results['dsmode'] = meta_js['dsmode'] + + # config-drive-v1 has no way of specifying user-data, so the user has + # to cheat and stuff it in a meta tag also. + results['userdata'] = meta_js.get('user-data', '') + + # this implementation does not support files other than + # network/interfaces and authorized_keys... + results['files'] = {} + + return results + + +class MetadataReader(BaseReader): + def __init__(self, base_url, ssl_details=None, timeout=5, retries=5): + super(MetadataReader, self).__init__(base_url) + self._url_reader = functools.partial(url_helper.readurl, + retries=retries, + ssl_details=ssl_details, + timeout=timeout) + self._url_checker = functools.partial(url_helper.existsurl, + ssl_details=ssl_details, + timeout=timeout) + self._ec2_reader = functools.partial(ec2_utils.get_instance_metadata, + ssl_details=ssl_details, + timeout=timeout, + retries=retries) + + def _path_read(self, path): + return str(self._url_reader(path)) + + def _path_exists(self, path): + return self._url_checker(path) + + def _path_join(self, base, *add_ons): + return url_helper.combine_url(base, *add_ons) + + def _read_ec2_metadata(self): + return self._ec2_reader() diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py new file mode 100644 index 00000000..7d93f1d3 --- /dev/null +++ b/tests/unittests/test_datasource/test_openstack.py @@ -0,0 +1,122 @@ +import re +import json + +from StringIO import StringIO + +from urlparse import urlparse + +from tests.unittests import helpers + +from cloudinit.sources import DataSourceOpenStack as ds +from cloudinit.sources.helpers import openstack +from cloudinit import util + +import httpretty as hp + +BASE_URL = "http://169.254.169.254" +PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' +EC2_META = { + 'ami-id': 'ami-00000001', + 'ami-launch-index': 0, + 'ami-manifest-path': 'FIXME', + 'hostname': 'sm-foo-test.novalocal', + 'instance-action': 'none', + 'instance-id': 'i-00000001', + 'instance-type': 'm1.tiny', + 'local-hostname': 'sm-foo-test.novalocal', + 'local-ipv4': '0.0.0.0', + 'public-hostname': 'sm-foo-test.novalocal', + 'public-ipv4': '0.0.0.1', + 'reservation-id': 'r-iru5qm4m', +} +USER_DATA = '#!/bin/sh\necho This is user data\n' +VENDOR_DATA = { + 'magic': '', +} +OSTACK_META = { + 'availability_zone': 'nova', + 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, + {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], + 'hostname': 'sm-foo-test.novalocal', + 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c', +} +CONTENT_0 = 'This is contents of /etc/foo.cfg\n' +CONTENT_1 = '# this is /etc/bar/bar.cfg\n' +OS_FILES = { + 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META), + 'openstack/2012-08-10/user_data': USER_DATA, + 'openstack/content/0000': CONTENT_0, + 'openstack/content/0001': CONTENT_1, + 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), + 'openstack/latest/user_data': USER_DATA, + 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA), +} +EC2_FILES = { + 'latest/user-data': USER_DATA, +} + + +def _register_uris(version): + + def match_ec2_url(uri, headers): + path = uri.path.lstrip("/") + if path in EC2_FILES: + return (200, headers, EC2_FILES.get(path)) + if path == 'latest/meta-data': + buf = StringIO() + for (k, v) in EC2_META.items(): + if isinstance(v, (list, tuple)): + buf.write("%s/" % (k)) + else: + buf.write("%s" % (k)) + buf.write("\n") + return (200, headers, buf.getvalue()) + if path.startswith('latest/meta-data'): + value = None + pieces = path.split("/") + if path.endswith("/"): + pieces = pieces[2:-1] + value = util.get_cfg_by_path(EC2_META, pieces) + else: + pieces = pieces[2:] + value = util.get_cfg_by_path(EC2_META, pieces) + if value is not None: + return (200, headers, str(value)) + return (404, headers, '') + + def get_request_callback(method, uri, headers): + uri = urlparse(uri) + path = uri.path.lstrip("/") + if path in OS_FILES: + return (200, headers, OS_FILES.get(path)) + return match_ec2_url(uri, headers) + + def head_request_callback(method, uri, headers): + uri = urlparse(uri) + path = uri.path.lstrip("/") + for key in OS_FILES.keys(): + if key.startswith(path): + return (200, headers, '') + return (404, headers, '') + + hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'), + body=get_request_callback) + + hp.register_uri(hp.HEAD, re.compile(r'http://169.254.169.254/.*'), + body=head_request_callback) + + +class TestOpenStackDataSource(helpers.TestCase): + VERSION = 'latest' + + @hp.activate + def test_fetch(self): + _register_uris(self.VERSION) + f = ds.read_metadata_service(BASE_URL, version=self.VERSION) + self.assertEquals(VENDOR_DATA, f.get('vendordata')) + self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertEquals(USER_DATA, f.get('userdata')) -- cgit v1.2.3 From f9582464b157ffb0087b18910af11aa6ed49abcd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 7 Feb 2014 16:34:04 -0800 Subject: Add a bunch of new tests --- cloudinit/sources/DataSourceOpenStack.py | 3 +- tests/unittests/helpers.py | 6 + tests/unittests/test_datasource/test_openstack.py | 197 ++++++++++++++++++++-- 3 files changed, 190 insertions(+), 16 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 44889f4e..621572de 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -121,7 +121,8 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): 'Crawl of openstack metadata service', read_metadata_service, args=[self.metadata_address], - kwargs={'ssl_details': self.ssl_details}) + kwargs={'ssl_details': self.ssl_details, + 'version': openstack.OS_LATEST}) except openstack.NonReadable: return False except openstack.BrokenMetadata: diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 5b4f4208..945c1500 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -29,6 +29,12 @@ if (_PY_MAJOR, _PY_MINOR) <= (2, 6): standardMsg = standardMsg % (member, container) self.fail(self._formatMessage(msg, standardMsg)) + def assertIsNone(self, value, msg=None): + if value is not None: + standardMsg = '%r is not None' + standardMsg = standardMsg % (value) + self.fail(self._formatMessage(msg, standardMsg)) + else: class TestCase(unittest.TestCase): pass diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 7d93f1d3..c8cc40db 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -1,12 +1,33 @@ -import re +# vi: ts=4 expandtab +# +# Copyright (C) 2014 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import copy import json +import re from StringIO import StringIO from urlparse import urlparse -from tests.unittests import helpers +from tests.unittests import helpers as test_helpers +from cloudinit import helpers +from cloudinit import settings from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources.helpers import openstack from cloudinit import util @@ -17,7 +38,7 @@ BASE_URL = "http://169.254.169.254" PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' EC2_META = { 'ami-id': 'ami-00000001', - 'ami-launch-index': 0, + 'ami-launch-index': '0', 'ami-manifest-path': 'FIXME', 'hostname': 'sm-foo-test.novalocal', 'instance-action': 'none', @@ -59,15 +80,17 @@ EC2_FILES = { } -def _register_uris(version): +def _register_uris(version, ec2_files, ec2_meta, os_files): + """Registers a set of url patterns into httpretty that will mimic the + same data returned by the openstack metadata service (and ec2 service).""" def match_ec2_url(uri, headers): path = uri.path.lstrip("/") - if path in EC2_FILES: - return (200, headers, EC2_FILES.get(path)) + if path in ec2_files: + return (200, headers, ec2_files.get(path)) if path == 'latest/meta-data': buf = StringIO() - for (k, v) in EC2_META.items(): + for (k, v) in ec2_meta.items(): if isinstance(v, (list, tuple)): buf.write("%s/" % (k)) else: @@ -79,10 +102,10 @@ def _register_uris(version): pieces = path.split("/") if path.endswith("/"): pieces = pieces[2:-1] - value = util.get_cfg_by_path(EC2_META, pieces) + value = util.get_cfg_by_path(ec2_meta, pieces) else: pieces = pieces[2:] - value = util.get_cfg_by_path(EC2_META, pieces) + value = util.get_cfg_by_path(ec2_meta, pieces) if value is not None: return (200, headers, str(value)) return (404, headers, '') @@ -90,14 +113,14 @@ def _register_uris(version): def get_request_callback(method, uri, headers): uri = urlparse(uri) path = uri.path.lstrip("/") - if path in OS_FILES: - return (200, headers, OS_FILES.get(path)) + if path in os_files: + return (200, headers, os_files.get(path)) return match_ec2_url(uri, headers) def head_request_callback(method, uri, headers): uri = urlparse(uri) path = uri.path.lstrip("/") - for key in OS_FILES.keys(): + for key in os_files.keys(): if key.startswith(path): return (200, headers, '') return (404, headers, '') @@ -109,14 +132,158 @@ def _register_uris(version): body=head_request_callback) -class TestOpenStackDataSource(helpers.TestCase): +class TestOpenStackDataSource(test_helpers.TestCase): VERSION = 'latest' @hp.activate - def test_fetch(self): - _register_uris(self.VERSION) + def test_successful(self): + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + f = ds.read_metadata_service(BASE_URL, version=self.VERSION) + self.assertEquals(VENDOR_DATA, f.get('vendordata')) + self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertEquals(2, len(f['files'])) + self.assertEquals(USER_DATA, f.get('userdata')) + self.assertEquals(EC2_META, f.get('ec2-metadata')) + self.assertEquals(2, f.get('version')) + metadata = f['metadata'] + self.assertEquals('nova', metadata.get('availability_zone')) + self.assertEquals('sm-foo-test.novalocal', metadata.get('hostname')) + self.assertEquals('sm-foo-test.novalocal', + metadata.get('local-hostname')) + self.assertEquals('sm-foo-test', metadata.get('name')) + self.assertEquals('b0fa911b-69d4-4476-bbe2-1c92bff6535c', + metadata.get('uuid')) + self.assertEquals('b0fa911b-69d4-4476-bbe2-1c92bff6535c', + metadata.get('instance-id')) + + @hp.activate + def test_no_ec2(self): + _register_uris(self.VERSION, {}, {}, OS_FILES) f = ds.read_metadata_service(BASE_URL, version=self.VERSION) self.assertEquals(VENDOR_DATA, f.get('vendordata')) self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg']) self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertEquals(USER_DATA, f.get('userdata')) + self.assertEquals({}, f.get('ec2-metadata')) + self.assertEquals(2, f.get('version')) + + @hp.activate + def test_bad_metadata(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(openstack.NonReadable, ds.read_metadata_service, + BASE_URL, version=self.VERSION) + + @hp.activate + def test_bad_uuid(self): + os_files = copy.deepcopy(OS_FILES) + os_meta = copy.deepcopy(OSTACK_META) + os_meta.pop('uuid') + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = json.dumps(os_meta) + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service, + BASE_URL, version=self.VERSION) + + @hp.activate + def test_userdata_empty(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('user_data'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + f = ds.read_metadata_service(BASE_URL, version=self.VERSION) + self.assertEquals(VENDOR_DATA, f.get('vendordata')) + self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertFalse(f.get('userdata')) + + @hp.activate + def test_vendordata_empty(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data.json'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + f = ds.read_metadata_service(BASE_URL, version=self.VERSION) + self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertFalse(f.get('vendordata')) + + @hp.activate + def test_vendordata_invalid(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service, + BASE_URL, version=self.VERSION) + + @hp.activate + def test_metadata_invalid(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service, + BASE_URL, version=self.VERSION) + + @hp.activate + def test_datasource(self): + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + self.assertIsNone(ds_os.version) + found = ds_os.get_data() + self.assertTrue(found) + self.assertEquals(2, ds_os.version) + md = dict(ds_os.metadata) + md.pop('instance-id', None) + md.pop('local-hostname', None) + self.assertEquals(OSTACK_META, md) + self.assertEquals(EC2_META, ds_os.ec2_metadata) + self.assertEquals(USER_DATA, ds_os.userdata_raw) + self.assertEquals(2, len(ds_os.files)) + self.assertEquals(VENDOR_DATA, ds_os.vendordata_raw) + + @hp.activate + def test_bad_datasource_meta(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + self.assertIsNone(ds_os.version) + found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + + @hp.activate + def test_no_datasource(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files.pop(k) + _register_uris(self.VERSION, {}, {}, os_files) + ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + ds_os.ds_cfg = { + 'max_wait': 0, + 'timeout': 0, + } + self.assertIsNone(ds_os.version) + found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) -- cgit v1.2.3 From 098a74e6207f5d91f515fac63e970375d52795c0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 8 Feb 2014 12:20:33 -0800 Subject: Remove HEAD usage and other small adjustments --- cloudinit/ec2_utils.py | 4 +-- cloudinit/sources/DataSourceOpenStack.py | 1 + cloudinit/sources/helpers/openstack.py | 41 ++++++++++++++--------- cloudinit/url_helper.py | 23 ++----------- tests/unittests/test_datasource/test_openstack.py | 11 ------ 5 files changed, 30 insertions(+), 50 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 91cba20f..a7c9c9ab 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -16,10 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import httplib -from urlparse import (urlparse, urlunparse) - import functools +import httplib import json from cloudinit import log as logging diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 621572de..2c50ed84 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -44,6 +44,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.ssl_details = util.fetch_ssl_details(self.paths) self.version = None self.files = {} + self.ec2_metadata = None def __str__(self): root = sources.DataSource.__str__(self) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9dbef677..09fb4ad8 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -21,7 +21,6 @@ import abc import base64 import copy -import functools import os from cloudinit import ec2_utils @@ -395,26 +394,38 @@ class ConfigDriveReader(BaseReader): class MetadataReader(BaseReader): def __init__(self, base_url, ssl_details=None, timeout=5, retries=5): super(MetadataReader, self).__init__(base_url) - self._url_reader = functools.partial(url_helper.readurl, - retries=retries, - ssl_details=ssl_details, - timeout=timeout) - self._url_checker = functools.partial(url_helper.existsurl, - ssl_details=ssl_details, - timeout=timeout) - self._ec2_reader = functools.partial(ec2_utils.get_instance_metadata, - ssl_details=ssl_details, - timeout=timeout, - retries=retries) + self.ssl_details = ssl_details + self.timeout = float(timeout) + self.retries = int(retries) def _path_read(self, path): - return str(self._url_reader(path)) + response = url_helper.readurl(path, + retries=self.retries, + ssl_details=self.ssl_details, + timeout=self.timeout) + return response.contents def _path_exists(self, path): - return self._url_checker(path) + + def should_retry_cb(request, cause): + if cause.code >= 400: + return False + return True + + try: + response = url_helper.readurl(path, + retries=self.retries, + ssl_details=self.ssl_details, + timeout=self.timeout, + exception_cb=should_retry_cb) + return response.ok() + except IOError: + return False def _path_join(self, base, *add_ons): return url_helper.combine_url(base, *add_ons) def _read_ec2_metadata(self): - return self._ec2_reader() + return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details, + timeout=self.timeout, + retries=self.retries) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 76a8e29b..a477b185 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -166,35 +166,16 @@ def _get_ssl_args(url, ssl_details): return ssl_args -def existsurl(url, ssl_details=None, timeout=None): - r = _readurl(url, ssl_details=ssl_details, timeout=timeout, - method='HEAD', check_status=False) - return r.ok() - - def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True, exception_cb=None): - return _readurl(url, data=data, timeout=timeout, retries=retries, - sec_between=sec_between, headers=headers, - headers_cb=headers_cb, ssl_details=ssl_details, - check_status=check_status, - allow_redirects=allow_redirects, - exception_cb=exception_cb) - - -def _readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True, exception_cb=None, - method='GET'): + check_status=True, allow_redirects=True, exception_cb=None): url = _cleanurl(url) req_args = { 'url': url, } req_args.update(_get_ssl_args(url, ssl_details)) - scheme = urlparse(url).scheme # pylint: disable=E1101 req_args['allow_redirects'] = allow_redirects - req_args['method'] = method + req_args['method'] = 'GET' if timeout is not None: req_args['timeout'] = max(float(timeout), 0) if data: diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 3fcf8bc9..3a64430a 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -117,20 +117,9 @@ def _register_uris(version, ec2_files, ec2_meta, os_files): return (200, headers, os_files.get(path)) return match_ec2_url(uri, headers) - def head_request_callback(method, uri, headers): - uri = urlparse(uri) - path = uri.path.lstrip("/") - for key in os_files.keys(): - if key.startswith(path): - return (200, headers, '') - return (404, headers, '') - hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'), body=get_request_callback) - hp.register_uri(hp.HEAD, re.compile(r'http://169.254.169.254/.*'), - body=head_request_callback) - class TestOpenStackDataSource(test_helpers.TestCase): VERSION = 'latest' -- cgit v1.2.3 From ea05883f88cedd5ed099aed19d4c5df563525097 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 8 Feb 2014 13:16:07 -0800 Subject: Capture IOError and use LOG better --- cloudinit/sources/DataSourceConfigDrive.py | 2 +- cloudinit/sources/DataSourceOpenStack.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 1d30fe08..142e0eb8 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -87,7 +87,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): md = util.mergemanydict([md, DEFAULT_METADATA]) user_dsmode = results.get('dsmode', None) if user_dsmode not in VALID_DSMODES + (None,): - LOG.warn("User specified invalid mode: %s" % user_dsmode) + LOG.warn("User specified invalid mode: %s", user_dsmode) user_dsmode = None dsmode = get_ds_mode(cfgdrv_ver=results['version'], diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 2c50ed84..69807798 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -126,14 +126,14 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): 'version': openstack.OS_LATEST}) except openstack.NonReadable: return False - except openstack.BrokenMetadata: + except (openstack.BrokenMetadata, IOError): util.logexc(LOG, "Broken metadata address %s", self.metadata_address) return False user_dsmode = results.get('dsmode', None) if user_dsmode not in VALID_DSMODES + (None,): - LOG.warn("User specified invalid mode: %s" % user_dsmode) + LOG.warn("User specified invalid mode: %s", user_dsmode) user_dsmode = None if user_dsmode == 'disabled': return False -- cgit v1.2.3 From 1edc8d3697d05f66195b9a425771f60cf6f9c27e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 8 Feb 2014 13:28:07 -0800 Subject: Update requests ssl not supported message --- cloudinit/url_helper.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index a477b185..43e879d2 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -39,6 +39,7 @@ NOT_FOUND = httplib.NOT_FOUND # Check if requests has ssl support (added in requests >= 0.8.8) SSL_ENABLED = False CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) +_REQ_VER = None try: from distutils.version import LooseVersion import pkg_resources @@ -152,7 +153,8 @@ def _get_ssl_args(url, ssl_details): scheme = urlparse(url).scheme # pylint: disable=E1101 if scheme == 'https' and ssl_details: if not SSL_ENABLED: - LOG.warn("SSL is not enabled, cert. verification can not occur!") + LOG.warn("SSL is not supported in requests v%s, " + "cert. verification can not occur!", _REQ_VER) else: if 'ca_certs' in ssl_details and ssl_details['ca_certs']: ssl_args['verify'] = ssl_details['ca_certs'] -- cgit v1.2.3 From 1597aca9b0606d02c045549afce4395370502231 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 8 Feb 2014 13:39:27 -0800 Subject: Spacing and comment cleanup --- cloudinit/url_helper.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 43e879d2..c116a484 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -169,8 +169,8 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True, exception_cb=None): + headers=None, headers_cb=None, ssl_details=None, + check_status=True, allow_redirects=True, exception_cb=None): url = _cleanurl(url) req_args = { 'url': url, @@ -206,12 +206,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, def _cb(url): return headers headers_cb = _cb - if data: - # Do this after the log (it might be large) req_args['data'] = data if sec_between is None: sec_between = -1 + excps = [] # Handle retrying ourselves since the built-in support # doesn't handle sleeping between tries... -- cgit v1.2.3 From 5788cd903f6e4a9bab2ad32e9c1d2eb13b485ac3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 8 Feb 2014 17:29:52 -0800 Subject: Handle code not existing in older requests versions --- cloudinit/sources/helpers/openstack.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 09fb4ad8..a17148d3 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -408,8 +408,13 @@ class MetadataReader(BaseReader): def _path_exists(self, path): def should_retry_cb(request, cause): - if cause.code >= 400: - return False + try: + code = int(cause.code) + if code >= 400: + return False + except (TypeError, ValueError): + # Older versions of requests didn't have a code. + pass return True try: -- cgit v1.2.3 From f82e9552145ff468763727d7e5a53f56dc9f5b20 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 13 Feb 2014 11:13:52 -0500 Subject: add Openstack to default datasources --- cloudinit/settings.py | 1 + 1 file changed, 1 insertion(+) (limited to 'cloudinit') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 0d8d8ec5..37d4958b 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -37,6 +37,7 @@ CFG_BUILTIN = { 'OVF', 'MAAS', 'GCE', + 'OpenStack' 'Ec2', 'CloudSigma', 'CloudStack', -- cgit v1.2.3 From 87d0fa867f27f101e93006ba8dc8a395098e8df1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 13 Feb 2014 12:13:42 -0500 Subject: wait less for the metadata service (by default) Waiting around for a metadata service in a given datasource means that if its not there all the subsequent datasources have to wait, and boot is slowed down. As it is right now, EC2 is the only one that has the right to wait. In the past, we had to wait around for the EC2 metadata service. I really do not want to extend that courtesy to other cloud platforms. A network based metadata service should be up as soon as networking is up. --- cloudinit/sources/DataSourceOpenStack.py | 28 +++++++++++++--------------- cloudinit/url_helper.py | 1 + 2 files changed, 14 insertions(+), 15 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 69807798..7fafa3f7 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -45,6 +45,8 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.version = None self.files = {} self.ec2_metadata = None + if not self.ds_cfg: + self.ds_cfg = {} def __str__(self): root = sources.DataSource.__str__(self) @@ -54,27 +56,25 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): def _get_url_settings(self): # TODO(harlowja): this is shared with ec2 datasource, we should just # move it to a shared location instead... - ds_cfg = self.ds_cfg - if not ds_cfg: - ds_cfg = {} - max_wait = 120 + # Note: the defaults here are different though. + + # max_wait < 0 indicates do not wait + max_wait = -1 + timeout = 10 + try: - max_wait = int(ds_cfg.get("max_wait", max_wait)) + max_wait = int(self.ds_cfg.get("max_wait", max_wait)) except Exception: util.logexc(LOG, "Failed to get max wait. using %s", max_wait) - timeout = 50 try: - timeout = max(0, int(ds_cfg.get("timeout", timeout))) + timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) except Exception: util.logexc(LOG, "Failed to get timeout, using %s", timeout) return (max_wait, timeout) def wait_for_metadata_service(self): - ds_cfg = self.ds_cfg - if not ds_cfg: - ds_cfg = {} - urls = ds_cfg.get("metadata_urls", [DEF_MD_URL]) + urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) filtered = [x for x in urls if util.is_resolvable_url(x)] if set(filtered) != set(urls): LOG.debug("Removed the following from metadata urls: %s", @@ -95,8 +95,6 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): url2base[md_url] = url (max_wait, timeout) = self._get_url_settings() - if max_wait <= 0: - return False start_time = time.time() avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, timeout=timeout, @@ -104,8 +102,8 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): if avail_url: LOG.debug("Using metadata source: '%s'", url2base[avail_url]) else: - LOG.critical("Giving up on md from %s after %s seconds", - md_urls, int(time.time() - start_time)) + LOG.debug("Giving up on OpenStack md from %s after %s seconds", + md_urls, int(time.time() - start_time)) self.metadata_address = url2base.get(avail_url) return bool(avail_url) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index c116a484..4a83169a 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -266,6 +266,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, max_wait: roughly the maximum time to wait before giving up The max time is *actually* len(urls)*timeout as each url will be tried once and given the timeout provided. + a number <= 0 will always result in only one try timeout: the timeout provided to urlopen status_cb: call method with string message when a url is not available headers_cb: call method with single argument of url to get headers -- cgit v1.2.3 From 5d5b8f90a10549bf54cf2000f514eb9c6ea420f7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 13 Feb 2014 12:50:59 -0500 Subject: do not warn on waiting for url --- cloudinit/sources/DataSourceOpenStack.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 7fafa3f7..5edbb448 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -97,8 +97,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): (max_wait, timeout) = self._get_url_settings() start_time = time.time() avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, - timeout=timeout, - status_cb=LOG.warn) + timeout=timeout) if avail_url: LOG.debug("Using metadata source: '%s'", url2base[avail_url]) else: -- cgit v1.2.3 From b8f1c6f27345d1aa0ef550a72bd34c2d7bd4ed41 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 13 Feb 2014 13:53:08 -0500 Subject: make 'Loaded datasource' a info message --- cloudinit/stages.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 7acd3355..58349ffc 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -234,7 +234,7 @@ class Init(object): copy.deepcopy(self.ds_deps), cfg_list, pkg_list) - LOG.debug("Loaded datasource %s - %s", dsname, ds) + LOG.info("Loaded datasource %s - %s", dsname, ds) self.datasource = ds # Ensure we adjust our path members datasource # now that we have one (thus allowing ipath to be used) -- cgit v1.2.3