diff options
Diffstat (limited to 'cloudinit/sources')
-rw-r--r-- | cloudinit/sources/DataSourceAltCloud.py | 299 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceCloudStack.py | 6 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceConfigDrive.py | 326 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceEc2.py | 48 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceNone.py | 61 | ||||
-rw-r--r-- | cloudinit/sources/__init__.py | 44 |
6 files changed, 668 insertions, 116 deletions
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py new file mode 100644 index 00000000..69c376a5 --- /dev/null +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -0,0 +1,299 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2009-2010 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joe VLcek <JVLcek@RedHat.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +''' +This file contains code used to gather the user data passed to an +instance on RHEVm and vSphere. +''' + +import errno +import os +import os.path + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit.util import ProcessExecutionError + +LOG = logging.getLogger(__name__) + +# Needed file paths +CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' + +# Shell command lists +CMD_DMI_SYSTEM = ['/usr/sbin/dmidecode', '--string', 'system-product-name'] +CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy'] +CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5'] + +META_DATA_NOT_SUPPORTED = { + 'block-device-mapping': {}, + 'instance-id': 455, + 'local-hostname': 'localhost', + 'placement': {}, + } + + +def read_user_data_callback(mount_dir): + ''' + Description: + This callback will be applied by util.mount_cb() on the mounted + file. + + Deltacloud file name contains deltacloud. Those not using + Deltacloud but instead instrumenting the injection, could + drop deltacloud from the file name. + + Input: + mount_dir - Mount directory + + Returns: + User Data + + ''' + + deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' + user_data_file = mount_dir + '/user-data.txt' + + # First try deltacloud_user_data_file. On failure try user_data_file. + try: + with open(deltacloud_user_data_file, 'r') as user_data_f: + user_data = user_data_f.read().strip() + except: + try: + with open(user_data_file, 'r') as user_data_f: + user_data = user_data_f.read().strip() + except: + util.logexc(LOG, ('Failed accessing user data file.')) + return None + + return user_data + + +class DataSourceAltCloud(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed = None + self.supported_seed_starts = ("/", "file://") + + def __str__(self): + mstr = "%s [seed=%s]" % (util.obj_name(self), self.seed) + return mstr + + def get_cloud_type(self): + ''' + Description: + Get the type for the cloud back end this instance is running on + by examining the string returned by: + dmidecode --string system-product-name + + On VMWare/vSphere dmidecode returns: RHEV Hypervisor + On VMWare/vSphere dmidecode returns: VMware Virtual Platform + + Input: + None + + Returns: + One of the following strings: + 'RHEV', 'VSPHERE' or 'UNKNOWN' + + ''' + + cmd = CMD_DMI_SYSTEM + try: + (cmd_out, _err) = util.subp(cmd) + except ProcessExecutionError, _err: + LOG.debug(('Failed command: %s\n%s') % \ + (' '.join(cmd), _err.message)) + return 'UNKNOWN' + except OSError, _err: + LOG.debug(('Failed command: %s\n%s') % \ + (' '.join(cmd), _err.message)) + return 'UNKNOWN' + + if cmd_out.upper().startswith('RHEV'): + return 'RHEV' + + if cmd_out.upper().startswith('VMWARE'): + return 'VSPHERE' + + return 'UNKNOWN' + + def get_data(self): + ''' + Description: + User Data is passed to the launching instance which + is used to perform instance configuration. + + Cloud providers expose the user data differently. + It is necessary to determine which cloud provider + the current instance is running on to determine + how to access the user data. Images built with + image factory will contain a CLOUD_INFO_FILE which + contains a string identifying the cloud provider. + + Images not built with Imagefactory will try to + determine what the cloud provider is based on system + information. + ''' + + LOG.debug('Invoked get_data()') + + if os.path.exists(CLOUD_INFO_FILE): + try: + cloud_info = open(CLOUD_INFO_FILE) + cloud_type = cloud_info.read().strip().upper() + cloud_info.close() + except: + util.logexc(LOG, 'Unable to access cloud info file.') + return False + else: + cloud_type = self.get_cloud_type() + + LOG.debug('cloud_type: ' + str(cloud_type)) + + if 'RHEV' in cloud_type: + if self.user_data_rhevm(): + return True + elif 'VSPHERE' in cloud_type: + if self.user_data_vsphere(): + return True + else: + # there was no recognized alternate cloud type + # indicating this handler should not be used. + return False + + # No user data found + util.logexc(LOG, ('Failed accessing user data.')) + return False + + def user_data_rhevm(self): + ''' + RHEVM specific userdata read + + If on RHEV-M the user data will be contained on the + floppy device in file <user_data_file> + To access it: + modprobe floppy + + Leverage util.mount_cb to: + mkdir <tmp mount dir> + mount /dev/fd0 <tmp mount dir> + The call back passed to util.mount_cb will do: + read <tmp mount dir>/<user_data_file> + ''' + + return_str = None + + # modprobe floppy + try: + cmd = CMD_PROBE_FLOPPY + (cmd_out, _err) = util.subp(cmd) + LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) + except ProcessExecutionError, _err: + util.logexc(LOG, (('Failed command: %s\n%s') % \ + (' '.join(cmd), _err.message))) + return False + except OSError, _err: + util.logexc(LOG, (('Failed command: %s\n%s') % \ + (' '.join(cmd), _err.message))) + return False + + floppy_dev = '/dev/fd0' + + # udevadm settle for floppy device + try: + cmd = CMD_UDEVADM_SETTLE + cmd.append('--exit-if-exists=' + floppy_dev) + (cmd_out, _err) = util.subp(cmd) + LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) + except ProcessExecutionError, _err: + util.logexc(LOG, (('Failed command: %s\n%s') % \ + (' '.join(cmd), _err.message))) + return False + except OSError, _err: + util.logexc(LOG, (('Failed command: %s\n%s') % \ + (' '.join(cmd), _err.message))) + return False + + try: + return_str = util.mount_cb(floppy_dev, read_user_data_callback) + except OSError as err: + if err.errno != errno.ENOENT: + raise + except util.MountFailedError: + util.logexc(LOG, ("Failed to mount %s" + " when looking for user data"), floppy_dev) + + self.userdata_raw = return_str + self.metadata = META_DATA_NOT_SUPPORTED + + if return_str: + return True + else: + return False + + def user_data_vsphere(self): + ''' + vSphere specific userdata read + + If on vSphere the user data will be contained on the + cdrom device in file <user_data_file> + To access it: + Leverage util.mount_cb to: + mkdir <tmp mount dir> + mount /dev/fd0 <tmp mount dir> + The call back passed to util.mount_cb will do: + read <tmp mount dir>/<user_data_file> + ''' + + return_str = None + cdrom_list = util.find_devs_with('LABEL=CDROM') + for cdrom_dev in cdrom_list: + try: + return_str = util.mount_cb(cdrom_dev, read_user_data_callback) + if return_str: + break + except OSError as err: + if err.errno != errno.ENOENT: + raise + except util.MountFailedError: + util.logexc(LOG, ("Failed to mount %s" + " when looking for user data"), cdrom_dev) + + self.userdata_raw = return_str + self.metadata = META_DATA_NOT_SUPPORTED + + if return_str: + return True + else: + return False + +# Used to match classes to dependencies +# Source DataSourceAltCloud does not really depend on networking. +# In the future 'dsmode' like behavior can be added to offer user +# the ability to run before networking. +datasources = [ + (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 751bef4f..f7ffa7cb 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -49,8 +49,7 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address = "http://%s/" % (gw_addr) def get_default_gateway(self): - """ Returns the default gateway ip address in the dotted format - """ + """Returns the default gateway ip address in the dotted format.""" lines = util.load_file("/proc/net/route").splitlines() for line in lines: items = line.split("\t") @@ -132,7 +131,8 @@ class DataSourceCloudStack(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def get_availability_zone(self): + @property + def availability_zone(self): return self.metadata['availability-zone'] diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 320dd1d1..b8154367 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -30,88 +30,119 @@ LOG = logging.getLogger(__name__) # Various defaults/constants... DEFAULT_IID = "iid-dsconfigdrive" DEFAULT_MODE = 'pass' -CFG_DRIVE_FILES = [ +CFG_DRIVE_FILES_V1 = [ "etc/network/interfaces", "root/.ssh/authorized_keys", "meta.js", ] DEFAULT_METADATA = { "instance-id": DEFAULT_IID, - "dsmode": DEFAULT_MODE, } -CFG_DRIVE_DEV_ENV = 'CLOUD_INIT_CONFIG_DRIVE_DEVICE' +VALID_DSMODES = ("local", "net", "pass", "disabled") class DataSourceConfigDrive(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) - self.seed = None - self.cfg = {} + self.source = None self.dsmode = 'local' self.seed_dir = os.path.join(paths.seed_dir, 'config_drive') + self.version = None def __str__(self): - mstr = "%s [%s]" % (util.obj_name(self), self.dsmode) - mstr += "[seed=%s]" % (self.seed) + mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode, + self.version) + mstr += "[source=%s]" % (self.source) return mstr def get_data(self): found = None md = {} - ud = "" + results = {} if os.path.isdir(self.seed_dir): try: - (md, ud) = read_config_drive_dir(self.seed_dir) + results = read_config_drive_dir(self.seed_dir) found = self.seed_dir except NonConfigDriveDir: util.logexc(LOG, "Failed reading config drive from %s", self.seed_dir) if not found: - dev = find_cfg_drive_device() - if dev: + devlist = find_candidate_devs() + for dev in devlist: try: - (md, ud) = util.mount_cb(dev, read_config_drive_dir) + results = util.mount_cb(dev, read_config_drive_dir) found = dev + break except (NonConfigDriveDir, util.MountFailedError): pass + except BrokenConfigDriveDir: + util.logexc(LOG, "broken config drive: %s", dev) if not found: return False - if 'dsconfig' in md: - self.cfg = md['dscfg'] - + md = results['metadata'] md = util.mergedict(md, DEFAULT_METADATA) - # Update interfaces and ifup only on the local datasource - # this way the DataSourceConfigDriveNet doesn't do it also. - if 'network-interfaces' in md and self.dsmode == "local": + user_dsmode = results.get('dsmode', None) + if user_dsmode not in VALID_DSMODES + (None,): + LOG.warn("user specified invalid mode: %s" % user_dsmode) + user_dsmode = None + + dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'], + ds_cfg=self.ds_cfg.get('dsmode'), + user=user_dsmode) + + if dsmode == "disabled": + # most likely user specified + return False + + # TODO(smoser): fix this, its dirty. + # we want to do some things (writing files and network config) + # only on first boot, and even then, we want to do so in the + # local datasource (so they happen earlier) even if the configured + # dsmode is 'net' or 'pass'. To do this, we check the previous + # instance-id + prev_iid = get_previous_iid(self.paths) + cur_iid = md['instance-id'] + + if ('network_config' in results and self.dsmode == "local" and + prev_iid != cur_iid): LOG.debug("Updating network interfaces from config drive (%s)", - md['dsmode']) - self.distro.apply_network(md['network-interfaces']) + dsmode) + self.distro.apply_network(results['network_config']) - self.seed = found - self.metadata = md - self.userdata_raw = ud + # file writing occurs in local mode (to be as early as possible) + if self.dsmode == "local" and prev_iid != cur_iid and results['files']: + LOG.debug("writing injected files") + try: + write_files(results['files']) + except: + util.logexc(LOG, "Failed writing files") + + # dsmode != self.dsmode here if: + # * dsmode = "pass", pass means it should only copy files and then + # pass to another datasource + # * dsmode = "net" and self.dsmode = "local" + # so that user boothooks would be applied with network, the + # local datasource just gets out of the way, and lets the net claim + if dsmode != self.dsmode: + LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode) + return False - if md['dsmode'] == self.dsmode: - return True + self.source = found + self.metadata = md + self.userdata_raw = results.get('userdata') + self.version = results['cfgdrive_ver'] - LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode']) - return False + return True def get_public_ssh_keys(self): if not 'public-keys' in self.metadata: return [] return self.metadata['public-keys'] - # The data sources' config_obj is a cloud-config formated - # object that came to it from ways other than cloud-config - # because cloud-config content would be handled elsewhere - def get_config_obj(self): - return self.cfg - class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): @@ -123,48 +154,146 @@ class NonConfigDriveDir(Exception): pass -def find_cfg_drive_device(): - """ Get the config drive device. Return a string like '/dev/vdb' - or None (if there is no non-root device attached). This does not - check the contents, only reports that if there *were* a config_drive - attached, it would be this device. - Note: per config_drive documentation, this is - "associated as the last available disk on the instance" - """ +class BrokenConfigDriveDir(Exception): + pass - # This seems to be for debugging?? - if CFG_DRIVE_DEV_ENV in os.environ: - return os.environ[CFG_DRIVE_DEV_ENV] - # We are looking for a raw block device (sda, not sda1) with a vfat - # filesystem on it.... - letters = "abcdefghijklmnopqrstuvwxyz" - devs = util.find_devs_with("TYPE=vfat") +def find_candidate_devs(): + """Return a list of devices that may contain the config drive. - # Filter out anything not ending in a letter (ignore partitions) - devs = [f for f in devs if f[-1] in letters] + The returned list is sorted by search order where the first item has + should be searched first (highest priority) + + config drive v1: + Per documentation, this is "associated as the last available disk on the + instance", and should be VFAT. + Currently, we do not restrict search list to "last available disk" + + config drive v2: + Disk should be: + * either vfat or iso9660 formated + * labeled with 'config-2' + """ - # Sort them in reverse so "last" device is first - devs.sort(reverse=True) + by_fstype = (util.find_devs_with("TYPE=vfat") + + util.find_devs_with("TYPE=iso9660")) + by_label = util.find_devs_with("LABEL=config-2") - if devs: - return devs[0] + # give preference to "last available disk" (vdb over vda) + # note, this is not a perfect rendition of that. + by_fstype.sort(reverse=True) + by_label.sort(reverse=True) - return None + # combine list of items by putting by-label items first + # followed by fstype items, but with dupes removed + combined = (by_label + [d for d in by_fstype if d not in by_label]) + + # We are looking for block device (sda, not sda1), ignore partitions + combined = [d for d in combined if d[-1] not in "0123456789"] + + return combined def read_config_drive_dir(source_dir): + last_e = NonConfigDriveDir("Not found") + for finder in (read_config_drive_dir_v2, read_config_drive_dir_v1): + try: + data = finder(source_dir) + return data + except NonConfigDriveDir as exc: + last_e = exc + raise last_e + + +def read_config_drive_dir_v2(source_dir, version="2012-08-10"): + + if (not os.path.isdir(os.path.join(source_dir, "openstack", version)) and + os.path.isdir(os.path.join(source_dir, "openstack", "latest"))): + LOG.warn("version '%s' not available, attempting to use 'latest'" % + version) + version = "latest" + + datafiles = ( + ('metadata', + "openstack/%s/meta_data.json" % version, True, json.loads), + ('userdata', "openstack/%s/user_data" % version, False, None), + ('ec2-metadata', "ec2/latest/metadata.json", False, json.loads), + ) + + results = {'userdata': None} + for (name, path, required, process) in datafiles: + fpath = os.path.join(source_dir, path) + data = None + found = False + if os.path.isfile(fpath): + try: + with open(fpath) as fp: + data = fp.read() + except Exception as exc: + raise BrokenConfigDriveDir("failed to read: %s" % fpath) + found = True + elif required: + raise NonConfigDriveDir("missing mandatory %s" % fpath) + + if found and process: + try: + data = process(data) + except Exception as exc: + raise BrokenConfigDriveDir("failed to process: %s" % fpath) + + if found: + results[name] = data + + # instance-id is 'uuid' for openstack. just copy it to instance-id. + if 'instance-id' not in results['metadata']: + try: + results['metadata']['instance-id'] = results['metadata']['uuid'] + except KeyError: + raise BrokenConfigDriveDir("No uuid entry in metadata") + + def read_content_path(item): + # do not use os.path.join here, as content_path starts with / + cpath = os.path.sep.join((source_dir, "openstack", + "./%s" % item['content_path'])) + with open(cpath) as fp: + return(fp.read()) + + files = {} + try: + for item in results['metadata'].get('files', {}): + files[item['path']] = read_content_path(item) + + # the 'network_config' item in metadata is a content pointer + # to the network config that should be applied. + # in folsom, it is just a '/etc/network/interfaces' file. + item = results['metadata'].get("network_config", None) + if item: + results['network_config'] = read_content_path(item) + except Exception as exc: + raise BrokenConfigDriveDir("failed to read file %s: %s" % (item, exc)) + + # to openstack, user can specify meta ('nova boot --meta=key=value') and + # those will appear under metadata['meta']. + # if they specify 'dsmode' they're indicating the mode that they intend + # for this datasource to operate in. + try: + results['dsmode'] = results['metadata']['meta']['dsmode'] + except KeyError: + pass + + results['files'] = files + results['cfgdrive_ver'] = 2 + return results + + +def read_config_drive_dir_v1(source_dir): """ - read_config_drive_dir(source_dir): - read source_dir, and return a tuple with metadata dict and user-data - string populated. If not a valid dir, raise a NonConfigDriveDir + read source_dir, and return a tuple with metadata dict, user-data, + files and version (1). If not a valid dir, raise a NonConfigDriveDir """ - # TODO: fix this for other operating systems... - # Ie: this is where https://fedorahosted.org/netcf/ or similar should - # be hooked in... (or could be) found = {} - for af in CFG_DRIVE_FILES: + for af in CFG_DRIVE_FILES_V1: fn = os.path.join(source_dir, af) if os.path.isfile(fn): found[af] = fn @@ -173,11 +302,10 @@ def read_config_drive_dir(source_dir): raise NonConfigDriveDir("%s: %s" % (source_dir, "no files found")) md = {} - ud = "" keydata = "" if "etc/network/interfaces" in found: fn = found["etc/network/interfaces"] - md['network-interfaces'] = util.load_file(fn) + md['network_config'] = util.load_file(fn) if "root/.ssh/authorized_keys" in found: fn = found["root/.ssh/authorized_keys"] @@ -197,21 +325,77 @@ def read_config_drive_dir(source_dir): (source_dir, "invalid json in meta.js", e)) md['meta_js'] = content - # Key data override?? + # keydata in meta_js is preferred over "injected" keydata = meta_js.get('public-keys', keydata) if keydata: lines = keydata.splitlines() md['public-keys'] = [l for l in lines if len(l) and not l.startswith("#")] - for copy in ('dsmode', 'instance-id', 'dscfg'): - if copy in meta_js: - md[copy] = meta_js[copy] + # config-drive-v1 has no way for openstack to provide the instance-id + # so we copy that into metadata from the user input + if 'instance-id' in meta_js: + md['instance-id'] = meta_js['instance-id'] + + results = {'cfgdrive_ver': 1, 'metadata': md} + + # allow the user to specify 'dsmode' in a meta tag + if 'dsmode' in meta_js: + results['dsmode'] = meta_js['dsmode'] + + # config-drive-v1 has no way of specifying user-data, so the user has + # to cheat and stuff it in a meta tag also. + results['userdata'] = meta_js.get('user-data') - if 'user-data' in meta_js: - ud = meta_js['user-data'] + # this implementation does not support files + # (other than network/interfaces and authorized_keys) + results['files'] = [] - return (md, ud) + return results + + +def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None): + """Determine what mode should be used. + valid values are 'pass', 'disabled', 'local', 'net' + """ + # user passed data trumps everything + if user is not None: + return user + + if ds_cfg is not None: + return ds_cfg + + # at config-drive version 1, the default behavior was pass. That + # meant to not use use it as primary data source, but expect a ec2 metadata + # source. for version 2, we default to 'net', which means + # the DataSourceConfigDriveNet, would be used. + # + # this could change in the future. If there was definitive metadata + # that indicated presense of an openstack metadata service, then + # we could change to 'pass' by default also. The motivation for that + # would be 'cloud-init query' as the web service could be more dynamic + if cfgdrv_ver == 1: + return "pass" + return "net" + + +def get_previous_iid(paths): + # interestingly, for this purpose the "previous" instance-id is the current + # instance-id. cloud-init hasn't moved them over yet as this datasource + # hasn't declared itself found. + fname = os.path.join(paths.get_cpath('data'), 'instance-id') + try: + with open(fname) as fp: + return fp.read() + except IOError: + return None + + +def write_files(files): + for (name, content) in files.iteritems(): + if name[0] != os.sep: + name = os.sep + name + util.write_file(name, content, mode=0660) # Used to match classes to dependencies diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index d9eb8f17..c7ad6d54 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -40,7 +40,7 @@ DEF_MD_VERSION = '2009-04-04' # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the # following may be discarded if they do not resolve -DEF_MD_URLS = [DEF_MD_URL, "http://instance-data:8773"] +DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"] class DataSourceEc2(sources.DataSource): @@ -77,46 +77,18 @@ class DataSourceEc2(sources.DataSource): self.metadata_address) return False + @property + def launch_index(self): + if not self.metadata: + return None + return self.metadata.get('ami-launch-index') + def get_instance_id(self): return self.metadata['instance-id'] def get_availability_zone(self): return self.metadata['placement']['availability-zone'] - def get_local_mirror(self): - return self.get_mirror_from_availability_zone() - - def get_mirror_from_availability_zone(self, availability_zone=None): - # Return type None indicates there is no cloud specific mirror - # Availability is like 'us-west-1b' or 'eu-west-1a' - if availability_zone is None: - availability_zone = self.get_availability_zone() - - if self.is_vpc(): - return None - - if not availability_zone: - return None - - mirror_tpl = self.distro.get_option('package_mirror_ec2_template', - None) - - if mirror_tpl is None: - return None - - # in EC2, the 'region' is 'us-east-1' if 'zone' is 'us-east-1a' - tpl_params = { - 'zone': availability_zone.strip(), - 'region': availability_zone[:-1] - } - mirror_url = mirror_tpl % (tpl_params) - - found = util.search_for_mirror([mirror_url]) - if found is not None: - return mirror_url - - return None - def _get_url_settings(self): mcfg = self.ds_cfg if not mcfg: @@ -255,6 +227,12 @@ class DataSourceEc2(sources.DataSource): return True return False + @property + def availability_zone(self): + try: + return self.metadata['placement']['availability-zone'] + except KeyError: + return None # Used to match classes to dependencies datasources = [ diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py new file mode 100644 index 00000000..c2125bee --- /dev/null +++ b/cloudinit/sources/DataSourceNone.py @@ -0,0 +1,61 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow <harlowja@yahoo-inc.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class DataSourceNone(sources.DataSource): + def __init__(self, sys_cfg, distro, paths, ud_proc=None): + sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) + self.metadata = {} + self.userdata_raw = '' + + def get_data(self): + # If the datasource config has any provided 'fallback' + # userdata or metadata, use it... + if 'userdata_raw' in self.ds_cfg: + self.userdata_raw = self.ds_cfg['userdata_raw'] + if 'metadata' in self.ds_cfg: + self.metadata = self.ds_cfg['metadata'] + return True + + def get_instance_id(self): + return 'iid-datasource-none' + + def __str__(self): + return util.obj_name(self) + + @property + def is_disconnected(self): + return True + + +# Used to match classes to dependencies +datasources = [ + (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceNone, []), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index b25724a5..6f126091 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -20,6 +20,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +from email.mime.multipart import MIMEMultipart + import abc from cloudinit import importer @@ -27,6 +29,8 @@ from cloudinit import log as logging from cloudinit import user_data as ud from cloudinit import util +from cloudinit.filters import launch_index + DEP_FILESYSTEM = "FILESYSTEM" DEP_NETWORK = "NETWORK" DS_PREFIX = 'DataSource' @@ -59,12 +63,34 @@ class DataSource(object): else: self.ud_proc = ud_proc - def get_userdata(self): + def get_userdata(self, apply_filter=False): if self.userdata is None: - raw_data = self.get_userdata_raw() - self.userdata = self.ud_proc.process(raw_data) + self.userdata = self.ud_proc.process(self.get_userdata_raw()) + if apply_filter: + return self._filter_userdata(self.userdata) return self.userdata + @property + def launch_index(self): + if not self.metadata: + return None + if 'launch-index' in self.metadata: + return self.metadata['launch-index'] + return None + + def _filter_userdata(self, processed_ud): + filters = [ + launch_index.Filter(util.safe_int(self.launch_index)), + ] + new_ud = processed_ud + for f in filters: + new_ud = f.apply(new_ud) + return new_ud + + @property + def is_disconnected(self): + return False + def get_userdata_raw(self): return self.userdata_raw @@ -113,9 +139,9 @@ class DataSource(object): def get_locale(self): return 'en_US.UTF-8' - def get_local_mirror(self): - # ?? - return None + @property + def availability_zone(self): + return self.metadata.get('availability-zone') def get_instance_id(self): if not self.metadata or 'instance-id' not in self.metadata: @@ -147,7 +173,7 @@ class DataSource(object): # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx lhost = self.metadata['local-hostname'] if util.is_ipv4(lhost): - toks = "ip-%s" % lhost.replace(".", "-") + toks = [ "ip-%s" % lhost.replace(".", "-") ] else: toks = lhost.split(".") @@ -162,6 +188,10 @@ class DataSource(object): else: return hostname + def get_package_mirror_info(self): + return self.distro.get_package_mirror_info( + availability_zone=self.availability_zone) + def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): ds_list = list_sources(cfg_list, ds_deps, pkg_list) |