diff options
Diffstat (limited to 'cloudinit')
-rw-r--r-- | cloudinit/config/cc_apt_configure.py | 30 | ||||
-rw-r--r-- | cloudinit/config/cc_disk_setup.py | 790 | ||||
-rw-r--r-- | cloudinit/config/cc_final_message.py | 2 | ||||
-rw-r--r-- | cloudinit/config/cc_growpart.py | 7 | ||||
-rw-r--r-- | cloudinit/config/cc_mounts.py | 131 | ||||
-rw-r--r-- | cloudinit/config/cc_power_state_change.py | 6 | ||||
-rw-r--r-- | cloudinit/config/cc_seed_random.py | 61 | ||||
-rw-r--r-- | cloudinit/config/cc_ssh_authkey_fingerprints.py | 2 | ||||
-rw-r--r-- | cloudinit/helpers.py | 18 | ||||
-rw-r--r-- | cloudinit/settings.py | 1 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceAzure.py | 34 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceConfigDrive.py | 44 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceOpenNebula.py | 442 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceSmartOS.py | 133 | ||||
-rw-r--r-- | cloudinit/sources/__init__.py | 18 | ||||
-rw-r--r-- | cloudinit/stages.py | 12 | ||||
-rw-r--r-- | cloudinit/util.py | 64 | ||||
-rw-r--r-- | cloudinit/version.py | 2 |
18 files changed, 1680 insertions, 117 deletions
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 5a407016..29c13a3d 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -20,6 +20,7 @@ import glob import os +import re from cloudinit import templater from cloudinit import util @@ -30,6 +31,9 @@ PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config" APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" +# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') +ADD_APT_REPO_MATCH = r"^[\w-]+:\w" + # A temporary shell program to get a given gpg key # from a given keyserver EXPORT_GPG_KEYID = """ @@ -78,7 +82,15 @@ def handle(name, cfg, cloud, log, _args): params = mirrors params['RELEASE'] = release params['MIRROR'] = mirror - errors = add_sources(cfg['apt_sources'], params) + + matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) + if matchcfg: + matcher = re.compile(matchcfg).search + else: + matcher = lambda f: False + + errors = add_sources(cfg['apt_sources'], params, + aa_repo_match=matcher) for e in errors: log.warn("Add source error: %s", ':'.join(e)) @@ -147,7 +159,7 @@ def generate_sources_list(codename, mirrors, cloud, log): templater.render_to_file(template_fn, '/etc/apt/sources.list', params) -def add_sources(srclist, template_params=None): +def add_sources(srclist, template_params=None, aa_repo_match=None): """ add entries in /etc/apt/sources.list.d for each abbreviated sources.list entry in 'srclist'. When rendering template, also @@ -156,6 +168,9 @@ def add_sources(srclist, template_params=None): if template_params is None: template_params = {} + if aa_repo_match is None: + aa_repo_match = lambda f: False + errorlist = [] for ent in srclist: if 'source' not in ent: @@ -163,15 +178,16 @@ def add_sources(srclist, template_params=None): continue source = ent['source'] - if source.startswith("ppa:"): + source = templater.render_string(source, template_params) + + if aa_repo_match(source): try: util.subp(["add-apt-repository", source]) - except: - errorlist.append([source, "add-apt-repository failed"]) + except util.ProcessExecutionError as e: + errorlist.append([source, + ("add-apt-repository failed. " + str(e))]) continue - source = templater.render_string(source, template_params) - if 'filename' not in ent: ent['filename'] = 'cloud_config_sources.list' diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py new file mode 100644 index 00000000..0b970e4e --- /dev/null +++ b/cloudinit/config/cc_disk_setup.py @@ -0,0 +1,790 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2009-2010 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Ben Howard <ben.howard@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +from cloudinit.settings import PER_INSTANCE +from cloudinit import util +import logging +import os +import shlex + +frequency = PER_INSTANCE + +# Define the commands to use +UDEVADM_CMD = util.which('udevadm') +SFDISK_CMD = util.which("sfdisk") +LSBLK_CMD = util.which("lsblk") +BLKID_CMD = util.which("blkid") +BLKDEV_CMD = util.which("blockdev") +WIPEFS_CMD = util.which("wipefs") + +LOG = logging.getLogger(__name__) + + +def handle(_name, cfg, cloud, log, _args): + """ + See doc/examples/cloud-config_disk-setup.txt for documentation on the + format. + """ + disk_setup = cfg.get("disk_setup") + if isinstance(disk_setup, dict): + update_disk_setup_devices(disk_setup, cloud.device_name_to_device) + log.debug("Partitioning disks: %s", str(disk_setup)) + for disk, definition in disk_setup.items(): + if not isinstance(definition, dict): + log.warn("Invalid disk definition for %s" % disk) + continue + + try: + log.debug("Creating new partition table/disk") + util.log_time(logfunc=LOG.debug, + msg="Creating partition on %s" % disk, + func=mkpart, args=(disk, definition)) + except Exception as e: + util.logexc(LOG, "Failed partitioning operation\n%s" % e) + + fs_setup = cfg.get("fs_setup") + if isinstance(fs_setup, list): + log.debug("setting up filesystems: %s", str(fs_setup)) + update_fs_setup_devices(fs_setup, cloud.device_name_to_device) + for definition in fs_setup: + if not isinstance(definition, dict): + log.warn("Invalid file system definition: %s" % definition) + continue + + try: + log.debug("Creating new filesystem.") + device = definition.get('device') + util.log_time(logfunc=LOG.debug, + msg="Creating fs for %s" % device, + func=mkfs, args=(definition,)) + except Exception as e: + util.logexc(LOG, "Failed during filesystem operation\n%s" % e) + + +def update_disk_setup_devices(disk_setup, tformer): + # update 'disk_setup' dictionary anywhere were a device may occur + # update it with the response from 'tformer' + for origname in disk_setup.keys(): + transformed = tformer(origname) + if transformed is None or transformed == origname: + continue + if transformed in disk_setup: + LOG.info("Replacing %s in disk_setup for translation of %s", + origname, transformed) + del disk_setup[transformed] + + disk_setup[transformed] = disk_setup[origname] + disk_setup[transformed]['_origname'] = origname + del disk_setup[origname] + LOG.debug("updated disk_setup device entry '%s' to '%s'", + origname, transformed) + + +def update_fs_setup_devices(disk_setup, tformer): + # update 'fs_setup' dictionary anywhere were a device may occur + # update it with the response from 'tformer' + for definition in disk_setup: + if not isinstance(definition, dict): + LOG.warn("entry in disk_setup not a dict: %s", definition) + continue + + origname = definition.get('device') + + if origname is None: + continue + + (dev, part) = util.expand_dotted_devname(origname) + + tformed = tformer(dev) + if tformed is not None: + dev = tformed + LOG.debug("%s is mapped to disk=%s part=%s", + origname, tformed, part) + definition['_origname'] = origname + definition['device'] = tformed + + if part and 'partition' in definition: + definition['_partition'] = definition['partition'] + definition['partition'] = part + + +def value_splitter(values, start=None): + """ + Returns the key/value pairs of output sent as string + like: FOO='BAR' HOME='127.0.0.1' + """ + _values = shlex.split(values) + if start: + _values = _values[start:] + + for key, value in [x.split('=') for x in _values]: + yield key, value + + +def enumerate_disk(device, nodeps=False): + """ + Enumerate the elements of a child device. + + Parameters: + device: the kernel device name + nodeps <BOOL>: don't enumerate children devices + + Return a dict describing the disk: + type: the entry type, i.e disk or part + fstype: the filesystem type, if it exists + label: file system label, if it exists + name: the device name, i.e. sda + """ + + lsblk_cmd = [LSBLK_CMD, '--pairs', '--out', 'NAME,TYPE,FSTYPE,LABEL', + device] + + if nodeps: + lsblk_cmd.append('--nodeps') + + info = None + try: + info, _err = util.subp(lsblk_cmd) + except Exception as e: + raise Exception("Failed during disk check for %s\n%s" % (device, e)) + + parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0] + + for part in parts: + d = {'name': None, + 'type': None, + 'fstype': None, + 'label': None, + } + + for key, value in value_splitter(part): + d[key.lower()] = value + + yield d + + +def device_type(device): + """ + Return the device type of the device by calling lsblk. + """ + + for d in enumerate_disk(device, nodeps=True): + if "type" in d: + return d["type"].lower() + return None + + +def is_device_valid(name, partition=False): + """ + Check if the device is a valid device. + """ + d_type = "" + try: + d_type = device_type(name) + except: + LOG.warn("Query against device %s failed" % name) + return False + + if partition and d_type == 'part': + return True + elif not partition and d_type == 'disk': + return True + return False + + +def check_fs(device): + """ + Check if the device has a filesystem on it + + Output of blkid is generally something like: + /dev/sda: LABEL="Backup500G" UUID="..." TYPE="ext4" + + Return values are device, label, type, uuid + """ + out, label, fs_type, uuid = None, None, None, None + + blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device] + try: + out, _err = util.subp(blkid_cmd, rcs=[0, 2]) + except Exception as e: + raise Exception("Failed during disk check for %s\n%s" % (device, e)) + + if out: + if len(out.splitlines()) == 1: + for key, value in value_splitter(out, start=1): + if key.lower() == 'label': + label = value + elif key.lower() == 'type': + fs_type = value + elif key.lower() == 'uuid': + uuid = value + + return label, fs_type, uuid + + +def is_filesystem(device): + """ + Returns true if the device has a file system. + """ + _, fs_type, _ = check_fs(device) + return fs_type + + +def find_device_node(device, fs_type=None, label=None, valid_targets=None, + label_match=True, replace_fs=None): + """ + Find a device that is either matches the spec, or the first + + The return is value is (<device>, <bool>) where the device is the + device to use and the bool is whether the device matches the + fs_type and label. + + Note: This works with GPT partition tables! + """ + # label of None is same as no label + if label is None: + label = "" + + if not valid_targets: + valid_targets = ['disk', 'part'] + + raw_device_used = False + for d in enumerate_disk(device): + + if d['fstype'] == replace_fs and label_match is False: + # We found a device where we want to replace the FS + return ('/dev/%s' % d['name'], False) + + if (d['fstype'] == fs_type and + ((label_match and d['label'] == label) or not label_match)): + # If we find a matching device, we return that + return ('/dev/%s' % d['name'], True) + + if d['type'] in valid_targets: + + if d['type'] != 'disk' or d['fstype']: + raw_device_used = True + + if d['type'] == 'disk': + # Skip the raw disk, its the default + pass + + elif not d['fstype']: + return ('/dev/%s' % d['name'], False) + + if not raw_device_used: + return (device, False) + + LOG.warn("Failed to find device during available device search.") + return (None, False) + + +def is_disk_used(device): + """ + Check if the device is currently used. Returns true if the device + has either a file system or a partition entry + is no filesystem found on the disk. + """ + + # If the child count is higher 1, then there are child nodes + # such as partition or device mapper nodes + use_count = [x for x in enumerate_disk(device)] + if len(use_count.splitlines()) > 1: + return True + + # If we see a file system, then its used + _, check_fstype, _ = check_fs(device) + if check_fstype: + return True + + return False + + +def get_hdd_size(device): + """ + Returns the hard disk size. + This works with any disk type, including GPT. + """ + + size_cmd = [SFDISK_CMD, '--show-size', device] + size = None + try: + size, _err = util.subp(size_cmd) + except Exception as e: + raise Exception("Failed to get %s size\n%s" % (device, e)) + + return int(size.strip()) + + +def get_dyn_func(*args): + """ + Call the appropriate function. + + The first value is the template for function name + The second value is the template replacement + The remain values are passed to the function + + For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,) + would call "foo_bar" with args of 1, 2, 3 + """ + if len(args) < 2: + raise Exception("Unable to determine dynamic funcation name") + + func_name = (args[0] % args[1]) + func_args = args[2:] + + try: + if func_args: + return globals()[func_name](*func_args) + else: + return globals()[func_name] + + except KeyError: + raise Exception("No such function %s to call!" % func_name) + + +def check_partition_mbr_layout(device, layout): + """ + Returns true if the partition layout matches the one on the disk + + Layout should be a list of values. At this time, this only + verifies that the number of partitions and their labels is correct. + """ + + read_parttbl(device) + prt_cmd = [SFDISK_CMD, "-l", device] + try: + out, _err = util.subp(prt_cmd, data="%s\n" % layout) + except Exception as e: + raise Exception("Error running partition command on %s\n%s" % ( + device, e)) + + found_layout = [] + for line in out.splitlines(): + _line = line.split() + if len(_line) == 0: + continue + + if device in _line[0]: + # We don't understand extended partitions yet + if _line[-1].lower() in ['extended', 'empty']: + continue + + # Find the partition types + type_label = None + for x in sorted(range(1, len(_line)), reverse=True): + if _line[x].isdigit() and _line[x] != '/': + type_label = _line[x] + break + + found_layout.append(type_label) + + if isinstance(layout, bool): + # if we are using auto partitioning, or "True" be happy + # if a single partition exists. + if layout and len(found_layout) >= 1: + return True + return False + + else: + if len(found_layout) != len(layout): + return False + else: + # This just makes sure that the number of requested + # partitions and the type labels are right + for x in range(1, len(layout) + 1): + if isinstance(layout[x - 1], tuple): + _, part_type = layout[x] + if int(found_layout[x]) != int(part_type): + return False + return True + + return False + + +def check_partition_layout(table_type, device, layout): + """ + See if the partition lay out matches. + + This is future a future proofing function. In order + to add support for other disk layout schemes, add a + function called check_partition_%s_layout + """ + return get_dyn_func("check_partition_%s_layout", table_type, device, + layout) + + +def get_partition_mbr_layout(size, layout): + """ + Calculate the layout of the partition table. Partition sizes + are defined as percentage values or a tuple of percentage and + partition type. + + For example: + [ 33, [66: 82] ] + + Defines the first partition to be a size of 1/3 the disk, + while the remaining 2/3's will be of type Linux Swap. + """ + + if not isinstance(layout, list) and isinstance(layout, bool): + # Create a single partition + return "0," + + if ((len(layout) == 0 and isinstance(layout, list)) or + not isinstance(layout, list)): + raise Exception("Partition layout is invalid") + + last_part_num = len(layout) + if last_part_num > 4: + raise Exception("Only simply partitioning is allowed.") + + part_definition = [] + part_num = 0 + for part in layout: + part_type = 83 # Default to Linux + percent = part + part_num += 1 + + if isinstance(part, list): + if len(part) != 2: + raise Exception("Partition was incorrectly defined: %s" % part) + percent, part_type = part + + part_size = int((float(size) * (float(percent) / 100)) / 1024) + + if part_num == last_part_num: + part_definition.append(",,%s" % part_type) + else: + part_definition.append(",%s,%s" % (part_size, part_type)) + + sfdisk_definition = "\n".join(part_definition) + if len(part_definition) > 4: + raise Exception("Calculated partition definition is too big\n%s" % + sfdisk_definition) + + return sfdisk_definition + + +def purge_disk_ptable(device): + # wipe the first and last megabyte of a disk (or file) + # gpt stores partition table both at front and at end. + null = '\0' # pylint: disable=W1401 + start_len = 1024 * 1024 + end_len = 1024 * 1024 + with open(device, "rb+") as fp: + fp.write(null * (start_len)) + fp.seek(-end_len, os.SEEK_END) + fp.write(null * end_len) + fp.flush() + + read_parttbl(device) + + +def purge_disk(device): + """ + Remove parition table entries + """ + + # wipe any file systems first + for d in enumerate_disk(device): + if d['type'] not in ["disk", "crypt"]: + wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] + try: + LOG.info("Purging filesystem on /dev/%s" % d['name']) + util.subp(wipefs_cmd) + except Exception: + raise Exception("Failed FS purge of /dev/%s" % d['name']) + + purge_disk_ptable(device) + + +def get_partition_layout(table_type, size, layout): + """ + Call the appropriate function for creating the table + definition. Returns the table definition + + This is a future proofing function. To add support for + other layouts, simply add a "get_partition_%s_layout" + function. + """ + return get_dyn_func("get_partition_%s_layout", table_type, size, layout) + + +def read_parttbl(device): + """ + Use partprobe instead of 'udevadm'. Partprobe is the only + reliable way to probe the partition table. + """ + blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] + udev_cmd = [UDEVADM_CMD, 'settle'] + try: + util.subp(udev_cmd) + util.subp(blkdev_cmd) + util.subp(udev_cmd) + except Exception as e: + util.logexc(LOG, "Failed reading the partition table %s" % e) + + +def exec_mkpart_mbr(device, layout): + """ + Break out of mbr partition to allow for future partition + types, i.e. gpt + """ + # Create the partitions + prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device] + try: + util.subp(prt_cmd, data="%s\n" % layout) + except Exception as e: + raise Exception("Failed to partition device %s\n%s" % (device, e)) + + read_parttbl(device) + + +def exec_mkpart(table_type, device, layout): + """ + Fetches the function for creating the table type. + This allows to dynamically find which function to call. + + Paramaters: + table_type: type of partition table to use + device: the device to work on + layout: layout definition specific to partition table + """ + return get_dyn_func("exec_mkpart_%s", table_type, device, layout) + + +def mkpart(device, definition): + """ + Creates the partition table. + + Parameters: + definition: dictionary describing how to create the partition. + + The following are supported values in the dict: + overwrite: Should the partition table be created regardless + of any pre-exisiting data? + layout: the layout of the partition table + table_type: Which partition table to use, defaults to MBR + device: the device to work on. + """ + + LOG.debug("Checking values for %s definition" % device) + overwrite = definition.get('overwrite', False) + layout = definition.get('layout', False) + table_type = definition.get('table_type', 'mbr') + + # Check if the default device is a partition or not + LOG.debug("Checking against default devices") + + if (isinstance(layout, bool) and not layout) or not layout: + LOG.debug("Device is not to be partitioned, skipping") + return # Device is not to be partitioned + + # This prevents you from overwriting the device + LOG.debug("Checking if device %s is a valid device", device) + if not is_device_valid(device): + raise Exception("Device %s is not a disk device!", device) + + # Remove the partition table entries + if isinstance(layout, str) and layout.lower() == "remove": + LOG.debug("Instructed to remove partition table entries") + purge_disk(device) + return + + LOG.debug("Checking if device layout matches") + if check_partition_layout(table_type, device, layout): + LOG.debug("Device partitioning layout matches") + return True + + LOG.debug("Checking if device is safe to partition") + if not overwrite and (is_disk_used(device) or is_filesystem(device)): + LOG.debug("Skipping partitioning on configured device %s" % device) + return + + LOG.debug("Checking for device size") + device_size = get_hdd_size(device) + + LOG.debug("Calculating partition layout") + part_definition = get_partition_layout(table_type, device_size, layout) + LOG.debug(" Layout is: %s" % part_definition) + + LOG.debug("Creating partition table on %s", device) + exec_mkpart(table_type, device, part_definition) + + LOG.debug("Partition table created for %s", device) + + +def lookup_force_flag(fs): + """ + A force flag might be -F or -F, this look it up + """ + flags = {'ext': '-F', + 'btrfs': '-f', + 'xfs': '-f', + 'reiserfs': '-f', + } + + if 'ext' in fs.lower(): + fs = 'ext' + + if fs.lower() in flags: + return flags[fs] + + LOG.warn("Force flag for %s is unknown." % fs) + return '' + + +def mkfs(fs_cfg): + """ + Create a file system on the device. + + label: defines the label to use on the device + fs_cfg: defines how the filesystem is to look + The following values are required generally: + device: which device or cloud defined default_device + filesystem: which file system type + overwrite: indiscriminately create the file system + partition: when device does not define a partition, + setting this to a number will mean + device + partition. When set to 'auto', the + first free device or the first device which + matches both label and type will be used. + + 'any' means the first filesystem that matches + on the device. + + When 'cmd' is provided then no other parameter is required. + """ + label = fs_cfg.get('label') + device = fs_cfg.get('device') + partition = str(fs_cfg.get('partition', 'any')) + fs_type = fs_cfg.get('filesystem') + fs_cmd = fs_cfg.get('cmd', []) + fs_opts = fs_cfg.get('extra_opts', []) + fs_replace = fs_cfg.get('replace_fs', False) + overwrite = fs_cfg.get('overwrite', False) + + # This allows you to define the default ephemeral or swap + LOG.debug("Checking %s against default devices", device) + + if not partition or partition.isdigit(): + # Handle manual definition of partition + if partition.isdigit(): + device = "%s%s" % (device, partition) + LOG.debug("Manual request of partition %s for %s", + partition, device) + + # Check to see if the fs already exists + LOG.debug("Checking device %s", device) + check_label, check_fstype, _ = check_fs(device) + LOG.debug("Device %s has %s %s", device, check_label, check_fstype) + + if check_label == label and check_fstype == fs_type: + LOG.debug("Existing file system found at %s", device) + + if not overwrite: + LOG.debug("Device %s has required file system", device) + return + else: + LOG.warn("Destroying filesystem on %s", device) + + else: + LOG.debug("Device %s is cleared for formating", device) + + elif partition and str(partition).lower() in ('auto', 'any'): + # For auto devices, we match if the filesystem does exist + odevice = device + LOG.debug("Identifying device to create %s filesytem on", label) + + # any mean pick the first match on the device with matching fs_type + label_match = True + if partition.lower() == 'any': + label_match = False + + device, reuse = find_device_node(device, fs_type=fs_type, label=label, + label_match=label_match, + replace_fs=fs_replace) + LOG.debug("Automatic device for %s identified as %s", odevice, device) + + if reuse: + LOG.debug("Found filesystem match, skipping formating.") + return + + if not reuse and fs_replace and device: + LOG.debug("Replacing file system on %s as instructed." % device) + + if not device: + LOG.debug("No device aviable that matches request. " + "Skipping fs creation for %s", fs_cfg) + return + elif not partition or str(partition).lower() == 'none': + LOG.debug("Using the raw device to place filesystem %s on" % label) + + else: + LOG.debug("Error in device identification handling.") + return + + LOG.debug("File system %s will be created on %s", label, device) + + # Make sure the device is defined + if not device: + LOG.warn("Device is not known: %s", device) + return + + # Check that we can create the FS + if not (fs_type or fs_cmd): + raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd " + "must be set.", label) + + # Create the commands + if fs_cmd: + fs_cmd = fs_cfg['cmd'] % {'label': label, + 'filesystem': fs_type, + 'device': device, + } + else: + # Find the mkfs command + mkfs_cmd = util.which("mkfs.%s" % fs_type) + if not mkfs_cmd: + mkfs_cmd = util.which("mk%s" % fs_type) + + if not mkfs_cmd: + LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type, + fs_type) + return + + fs_cmd = [mkfs_cmd, device] + + if label: + fs_cmd.extend(["-L", label]) + + # File systems that support the -F flag + if not fs_cmd and (overwrite or device_type(device) == "disk"): + fs_cmd.append(lookup_force_flag(fs_type)) + + # Add the extends FS options + if fs_opts: + fs_cmd.extend(fs_opts) + + LOG.debug("Creating file system %s on %s", label, device) + LOG.debug(" Using cmd: %s", " ".join(fs_cmd)) + try: + util.subp(fs_cmd) + except Exception as e: + raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index 6b864fda..e92cba4a 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -54,7 +54,7 @@ def handle(_name, cfg, cloud, log, args): 'datasource': str(cloud.datasource), } util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), - console=False, stderr=True) + console=False, stderr=True, log=log) except Exception: util.logexc(log, "Failed to render final message template") diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 2d54aabf..0dd92a46 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -32,6 +32,7 @@ frequency = PER_ALWAYS DEFAULT_CONFIG = { 'mode': 'auto', 'devices': ['/'], + 'ignore_growroot_disabled': False, } @@ -251,6 +252,12 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("growpart disabled: mode=%s" % mode) return + if util.is_false(mycfg.get('ignore_growroot_disabled', False)): + if os.path.isfile("/etc/growroot-disabled"): + log.debug("growpart disabled: /etc/growroot-disabled exists") + log.debug("use ignore_growroot_disabled to ignore") + return + devices = util.get_cfg_option_list(cfg, "devices", ["/"]) if not len(devices): log.debug("growpart: empty device list") diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 390ba711..80590118 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -20,6 +20,8 @@ from string import whitespace # pylint: disable=W0402 +import logging +import os.path import re from cloudinit import type_utils @@ -31,6 +33,8 @@ SHORTNAME = re.compile(SHORTNAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) FSTAB_PATH = "/etc/fstab" +LOG = logging.getLogger(__name__) + def is_mdname(name): # return true if this is a metadata service name @@ -44,6 +48,33 @@ def is_mdname(name): return False +def sanitize_devname(startname, transformer, log): + log.debug("Attempting to determine the real name of %s", startname) + + # workaround, allow user to specify 'ephemeral' + # rather than more ec2 correct 'ephemeral0' + devname = startname + if devname == "ephemeral": + devname = "ephemeral0" + log.debug("Adjusted mount option from ephemeral to ephemeral0") + + (blockdev, part) = util.expand_dotted_devname(devname) + + if is_mdname(blockdev): + orig = blockdev + blockdev = transformer(blockdev) + if not blockdev: + return None + if not blockdev.startswith("/"): + blockdev = "/dev/%s" % blockdev + log.debug("Mapped metadata name %s to %s", orig, blockdev) + else: + if SHORTNAME.match(startname): + blockdev = "/dev/%s" % blockdev + + return devnode_for_dev_part(blockdev, part) + + def handle(_name, cfg, cloud, log, _args): # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno defvals = [None, None, "auto", "defaults,nobootwait", "0", "2"] @@ -64,32 +95,15 @@ def handle(_name, cfg, cloud, log, _args): (i + 1), type_utils.obj_name(cfgmnt[i])) continue - startname = str(cfgmnt[i][0]) - log.debug("Attempting to determine the real name of %s", startname) - - # workaround, allow user to specify 'ephemeral' - # rather than more ec2 correct 'ephemeral0' - if startname == "ephemeral": - cfgmnt[i][0] = "ephemeral0" - log.debug(("Adjusted mount option %s " - "name from ephemeral to ephemeral0"), (i + 1)) - - if is_mdname(startname): - newname = cloud.device_name_to_device(startname) - if not newname: - log.debug("Ignoring nonexistant named mount %s", startname) - cfgmnt[i][1] = None - else: - renamed = newname - if not newname.startswith("/"): - renamed = "/dev/%s" % newname - cfgmnt[i][0] = renamed - log.debug("Mapped metadata name %s to %s", startname, renamed) - else: - if SHORTNAME.match(startname): - renamed = "/dev/%s" % startname - log.debug("Mapped shortname name %s to %s", startname, renamed) - cfgmnt[i][0] = renamed + start = str(cfgmnt[i][0]) + sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + if sanitized is None: + log.debug("Ignorming nonexistant named mount %s", start) + continue + + if sanitized != start: + log.debug("changed %s => %s" % (start, sanitized)) + cfgmnt[i][0] = sanitized # in case the user did not quote a field (likely fs-freq, fs_passno) # but do not convert None to 'None' (LP: #898365) @@ -118,17 +132,14 @@ def handle(_name, cfg, cloud, log, _args): # for each of the "default" mounts, add them only if no other # entry has the same device name for defmnt in defmnts: - startname = defmnt[0] - devname = cloud.device_name_to_device(startname) - if devname is None: - log.debug("Ignoring nonexistant named default mount %s", startname) + start = defmnt[0] + sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + if sanitized is None: + log.debug("Ignoring nonexistant default named mount %s", start) continue - if devname.startswith("/"): - defmnt[0] = devname - else: - defmnt[0] = "/dev/%s" % devname - - log.debug("Mapped default device %s to %s", startname, defmnt[0]) + if sanitized != start: + log.debug("changed default device %s => %s" % (start, sanitized)) + defmnt[0] = sanitized cfgmnt_has = False for cfgm in cfgmnt: @@ -138,7 +149,7 @@ def handle(_name, cfg, cloud, log, _args): if cfgmnt_has: log.debug(("Not including %s, already" - " previously included"), startname) + " previously included"), start) continue cfgmnt.append(defmnt) @@ -198,3 +209,49 @@ def handle(_name, cfg, cloud, log, _args): util.subp(("mount", "-a")) except: util.logexc(log, "Activating mounts via 'mount -a' failed") + + +def devnode_for_dev_part(device, partition): + """ + Find the name of the partition. While this might seem rather + straight forward, its not since some devices are '<device><partition>' + while others are '<device>p<partition>'. For example, /dev/xvda3 on EC2 + will present as /dev/xvda3p1 for the first partition since /dev/xvda3 is + a block device. + """ + if not os.path.exists(device): + return None + + short_name = os.path.basename(device) + sys_path = "/sys/block/%s" % short_name + + if not os.path.exists(sys_path): + LOG.debug("did not find entry for %s in /sys/block", short_name) + return None + + sys_long_path = sys_path + "/" + short_name + + if partition is not None: + partition = str(partition) + + if partition is None: + valid_mappings = [sys_long_path + "1", sys_long_path + "p1"] + elif partition != "0": + valid_mappings = [sys_long_path + "%s" % partition, + sys_long_path + "p%s" % partition] + else: + valid_mappings = [] + + for cdisk in valid_mappings: + if not os.path.exists(cdisk): + continue + + dev_path = "/dev/%s" % os.path.basename(cdisk) + if os.path.exists(dev_path): + return dev_path + + if partition is None or partition == "0": + return device + + LOG.debug("Did not fine partition %s for device %s", partition, device) + return None diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 188047e5..e3150808 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -75,6 +75,12 @@ def load_power_state(cfg): ','.join(opt_map.keys())) delay = pstate.get("delay", "now") + # convert integer 30 or string '30' to '+30' + try: + delay = "+%s" % int(delay) + except ValueError: + pass + if delay != "now" and not re.match(r"\+[0-9]+", delay): raise TypeError("power_state[delay] must be 'now' or '+m' (minutes).") diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py new file mode 100644 index 00000000..22a31f29 --- /dev/null +++ b/cloudinit/config/cc_seed_random.py @@ -0,0 +1,61 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Yahoo! Inc. +# +# Author: Joshua Harlow <harlowja@yahoo-inc.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import base64 +from StringIO import StringIO + +from cloudinit.settings import PER_INSTANCE +from cloudinit import util + +frequency = PER_INSTANCE + + +def _decode(data, encoding=None): + if not data: + return '' + if not encoding or encoding.lower() in ['raw']: + return data + elif encoding.lower() in ['base64', 'b64']: + return base64.b64decode(data) + elif encoding.lower() in ['gzip', 'gz']: + return util.decomp_gzip(data, quiet=False) + else: + raise IOError("Unknown random_seed encoding: %s" % (encoding)) + + +def handle(name, cfg, cloud, log, _args): + if not cfg or "random_seed" not in cfg: + log.debug(("Skipping module named %s, " + "no 'random_seed' configuration found"), name) + return + + my_cfg = cfg['random_seed'] + seed_path = my_cfg.get('file', '/dev/urandom') + seed_buf = StringIO() + seed_buf.write(_decode(my_cfg.get('data', ''), + encoding=my_cfg.get('encoding'))) + + metadata = cloud.datasource.metadata + if metadata and 'random_seed' in metadata: + seed_buf.write(metadata['random_seed']) + + seed_data = seed_buf.getvalue() + if len(seed_data): + log.debug("%s: adding %s bytes of random seed entrophy to %s", name, + len(seed_data), seed_path) + util.append_file(seed_path, seed_data) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index c38bcea2..be8083db 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -63,7 +63,7 @@ def _is_printable_key(entry): def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', prefix='ci-info: '): if not key_entries: - message = ("%sno authorized ssh keys fingerprints found for user %s." + message = ("%sno authorized ssh keys fingerprints found for user %s.\n" % (prefix, user)) util.multi_log(message) return diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 1c46efde..e5eac6a7 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -292,11 +292,16 @@ class ContentHandlers(object): def is_registered(self, content_type): return content_type in self.registered - def register(self, mod, initialized=False): + def register(self, mod, initialized=False, overwrite=True): types = set() for t in mod.list_types(): + if overwrite: + types.add(t) + else: + if not self.is_registered(t): + types.add(t) + for t in types: self.registered[t] = mod - types.add(t) if initialized and mod not in self.initialized: self.initialized.append(mod) return types @@ -310,15 +315,6 @@ class ContentHandlers(object): def iteritems(self): return self.registered.iteritems() - def register_defaults(self, defs): - registered = set() - for mod in defs: - for t in mod.list_types(): - if not self.is_registered(t): - self.registered[t] = mod - registered.add(t) - return registered - class Paths(object): def __init__(self, path_cfgs, ds=None): diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 9f6badae..5df7f557 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -31,6 +31,7 @@ CFG_BUILTIN = { 'datasource_list': [ 'NoCloud', 'ConfigDrive', + 'OpenNebula', 'Azure', 'AltCloud', 'OVF', diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 66d7728b..b18c57e7 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -44,8 +44,21 @@ BUILTIN_DS_CONFIG = { 'policy': True, 'command': BOUNCE_COMMAND, 'hostname_command': 'hostname', - } + }, + 'disk_aliases': {'ephemeral0': '/dev/sdb'}, } + +BUILTIN_CLOUD_CONFIG = { + 'disk_setup': { + 'ephemeral0': {'table_type': 'mbr', + 'layout': True, + 'overwrite': False} + }, + 'fs_setup': [{'filesystem': 'ext4', + 'device': 'ephemeral0.1', + 'replace_fs': 'ntfs'}] +} + DS_CFG_PATH = ['datasource', DS_NAME] @@ -94,7 +107,7 @@ class DataSourceAzureNet(sources.DataSource): (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) - self.cfg = cfg + self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG]) found = cdev LOG.debug("found datasource in %s", cdev) @@ -106,9 +119,14 @@ class DataSourceAzureNet(sources.DataSource): if found == ddir: LOG.debug("using files cached in %s", ddir) + # azure / hyper-v provides random data here + seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True) + if seed: + self.metadata['random_seed'] = seed + # now update ds_cfg to reflect contents pass in config - usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) - self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg]) + user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) + self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) mycfg = self.ds_cfg # walinux agent writes files world readable, but expects @@ -156,9 +174,11 @@ class DataSourceAzureNet(sources.DataSource): pubkeys = pubkeys_from_crt_files(fp_files) self.metadata['public-keys'] = pubkeys - return True + def device_name_to_device(self, name): + return self.ds_cfg['disk_aliases'].get(name) + def get_config_obj(self): return self.cfg @@ -344,7 +364,7 @@ def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: - raise NonAzureDataSource("invalid xml: %s" % e) + raise BrokenAzureDataSource("invalid xml: %s" % e) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") @@ -390,7 +410,7 @@ def read_azure_ovf(contents): simple = True value = child.childNodes[0].wholeText - attrs = {k: v for k, v in child.attributes.items()} + attrs = dict([(k, v) for k, v in child.attributes.items()]) # we accept either UserData or CustomData. If both are present # then behavior is undefined. diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 835f2a9a..4f437244 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import base64 import json import os @@ -41,6 +42,25 @@ DEFAULT_METADATA = { VALID_DSMODES = ("local", "net", "pass", "disabled") +class ConfigDriveHelper(object): + def __init__(self, distro): + self.distro = distro + + def on_first_boot(self, data): + if not data: + data = {} + if 'network_config' in data: + LOG.debug("Updating network interfaces from config drive") + self.distro.apply_network(data['network_config']) + files = data.get('files') + if files: + LOG.debug("Writing %s injected files", len(files)) + try: + write_files(files) + except IOError: + util.logexc(LOG, "Failed writing files") + + class DataSourceConfigDrive(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -49,6 +69,7 @@ class DataSourceConfigDrive(sources.DataSource): self.seed_dir = os.path.join(paths.seed_dir, 'config_drive') self.version = None self.ec2_metadata = None + self.helper = ConfigDriveHelper(distro) def __str__(self): root = sources.DataSource.__str__(self) @@ -187,20 +208,8 @@ class DataSourceConfigDrive(sources.DataSource): # instance-id prev_iid = get_previous_iid(self.paths) cur_iid = md['instance-id'] - - if ('network_config' in results and self.dsmode == "local" and - prev_iid != cur_iid): - LOG.debug("Updating network interfaces from config drive (%s)", - dsmode) - self.distro.apply_network(results['network_config']) - - # file writing occurs in local mode (to be as early as possible) - if self.dsmode == "local" and prev_iid != cur_iid and results['files']: - LOG.debug("writing injected files") - try: - write_files(results['files']) - except: - util.logexc(LOG, "Failed writing files") + if prev_iid != cur_iid and self.dsmode == "local": + self.helper.on_first_boot(results) # dsmode != self.dsmode here if: # * dsmode = "pass", pass means it should only copy files and then @@ -338,6 +347,13 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): except KeyError: raise BrokenConfigDriveDir("No uuid entry in metadata") + if 'random_seed' in results['metadata']: + random_seed = results['metadata']['random_seed'] + try: + results['metadata']['random_seed'] = base64.b64decode(random_seed) + except (ValueError, TypeError) as exc: + raise BrokenConfigDriveDir("Badly formatted random_seed: %s" % exc) + def read_content_path(item): # do not use os.path.join here, as content_path starts with / cpath = os.path.sep.join((source_dir, "openstack", diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py new file mode 100644 index 00000000..07dc25ff --- /dev/null +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -0,0 +1,442 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2012-2013 CERIT Scientific Cloud +# Copyright (C) 2012-2013 OpenNebula.org +# +# Author: Scott Moser <scott.moser@canonical.com> +# Author: Joshua Harlow <harlowja@yahoo-inc.com> +# Author: Vlastimil Holer <xholer@mail.muni.cz> +# Author: Javier Fontan <jfontan@opennebula.org> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import pwd +import re +import string # pylint: disable=W0402 + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util + +LOG = logging.getLogger(__name__) + +DEFAULT_IID = "iid-dsopennebula" +DEFAULT_MODE = 'net' +DEFAULT_PARSEUSER = 'nobody' +CONTEXT_DISK_FILES = ["context.sh"] +VALID_DSMODES = ("local", "net", "disabled") + + +class DataSourceOpenNebula(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.dsmode = 'local' + self.seed = None + self.seed_dir = os.path.join(paths.seed_dir, 'opennebula') + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) + + def get_data(self): + defaults = {"instance-id": DEFAULT_IID} + results = None + seed = None + + # decide parseuser for context.sh shell reader + parseuser = DEFAULT_PARSEUSER + if 'parseuser' in self.ds_cfg: + parseuser = self.ds_cfg.get('parseuser') + + candidates = [self.seed_dir] + candidates.extend(find_candidate_devs()) + for cdev in candidates: + try: + if os.path.isdir(self.seed_dir): + results = read_context_disk_dir(cdev, asuser=parseuser) + elif cdev.startswith("/dev"): + results = util.mount_cb(cdev, read_context_disk_dir, + data=parseuser) + except NonContextDiskDir: + continue + except BrokenContextDiskDir as exc: + raise exc + except util.MountFailedError: + LOG.warn("%s was not mountable" % cdev) + + if results: + seed = cdev + LOG.debug("found datasource in %s", cdev) + break + + if not seed: + return False + + # merge fetched metadata with datasource defaults + md = results['metadata'] + md = util.mergemanydict([md, defaults]) + + # check for valid user specified dsmode + user_dsmode = results['metadata'].get('DSMODE', None) + if user_dsmode not in VALID_DSMODES + (None,): + LOG.warn("user specified invalid mode: %s", user_dsmode) + user_dsmode = None + + # decide dsmode + if user_dsmode: + dsmode = user_dsmode + elif self.ds_cfg.get('dsmode'): + dsmode = self.ds_cfg.get('dsmode') + else: + dsmode = DEFAULT_MODE + + if dsmode == "disabled": + # most likely user specified + return False + + # apply static network configuration only in 'local' dsmode + if ('network-interfaces' in results and self.dsmode == "local"): + LOG.debug("Updating network interfaces from %s", self) + self.distro.apply_network(results['network-interfaces']) + + if dsmode != self.dsmode: + LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode) + return False + + self.seed = seed + self.metadata = md + self.userdata_raw = results.get('userdata') + return True + + def get_hostname(self, fqdn=False, resolve_ip=None): + if resolve_ip is None: + if self.dsmode == 'net': + resolve_ip = True + else: + resolve_ip = False + return sources.DataSource.get_hostname(self, fqdn, resolve_ip) + + +class DataSourceOpenNebulaNet(DataSourceOpenNebula): + def __init__(self, sys_cfg, distro, paths): + DataSourceOpenNebula.__init__(self, sys_cfg, distro, paths) + self.dsmode = 'net' + + +class NonContextDiskDir(Exception): + pass + + +class BrokenContextDiskDir(Exception): + pass + + +class OpenNebulaNetwork(object): + REG_DEV_MAC = re.compile( + r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?', + re.MULTILINE | re.DOTALL) + + def __init__(self, ip, context): + self.ip = ip + self.context = context + self.ifaces = self.get_ifaces() + + def get_ifaces(self): + return self.REG_DEV_MAC.findall(self.ip) + + def mac2ip(self, mac): + components = mac.split(':')[2:] + return [str(int(c, 16)) for c in components] + + def get_ip(self, dev, components): + var_name = dev.upper() + '_IP' + if var_name in self.context: + return self.context[var_name] + else: + return '.'.join(components) + + def get_mask(self, dev): + var_name = dev.upper() + '_MASK' + if var_name in self.context: + return self.context[var_name] + else: + return '255.255.255.0' + + def get_network(self, dev, components): + var_name = dev.upper() + '_NETWORK' + if var_name in self.context: + return self.context[var_name] + else: + return '.'.join(components[:-1]) + '.0' + + def get_gateway(self, dev): + var_name = dev.upper() + '_GATEWAY' + if var_name in self.context: + return self.context[var_name] + else: + return None + + def get_dns(self, dev): + var_name = dev.upper() + '_DNS' + if var_name in self.context: + return self.context[var_name] + else: + return None + + def get_domain(self, dev): + var_name = dev.upper() + '_DOMAIN' + if var_name in self.context: + return self.context[var_name] + else: + return None + + def gen_conf(self): + global_dns = [] + if 'DNS' in self.context: + global_dns.append(self.context['DNS']) + + conf = [] + conf.append('auto lo') + conf.append('iface lo inet loopback') + conf.append('') + + for i in self.ifaces: + dev = i[0] + mac = i[1] + ip_components = self.mac2ip(mac) + + conf.append('auto ' + dev) + conf.append('iface ' + dev + ' inet static') + conf.append(' address ' + self.get_ip(dev, ip_components)) + conf.append(' network ' + self.get_network(dev, ip_components)) + conf.append(' netmask ' + self.get_mask(dev)) + + gateway = self.get_gateway(dev) + if gateway: + conf.append(' gateway ' + gateway) + + domain = self.get_domain(dev) + if domain: + conf.append(' dns-search ' + domain) + + # add global DNS servers to all interfaces + dns = self.get_dns(dev) + if global_dns or dns: + all_dns = global_dns + if dns: + all_dns.append(dns) + conf.append(' dns-nameservers ' + ' '.join(all_dns)) + + conf.append('') + + return "\n".join(conf) + + +def find_candidate_devs(): + """ + Return a list of devices that may contain the context disk. + """ + combined = [] + for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'): + devs = util.find_devs_with(f) + devs.sort() + for d in devs: + if d not in combined: + combined.append(d) + + return combined + + +def switch_user_cmd(user): + return ['sudo', '-u', user] + + +def parse_shell_config(content, keylist=None, bash=None, asuser=None, + switch_user_cb=None): + + if isinstance(bash, str): + bash = [bash] + elif bash is None: + bash = ['bash', '-e'] + + if switch_user_cb is None: + switch_user_cb = switch_user_cmd + + # allvars expands to all existing variables by using '${!x*}' notation + # where x is lower or upper case letters or '_' + allvars = ["${!%s*}" % x for x in string.letters + "_"] + + keylist_in = keylist + if keylist is None: + keylist = allvars + keylist_in = [] + + setup = '\n'.join(('__v="";', '',)) + + def varprinter(vlist): + # output '\0'.join(['_start_', key=value NULL for vars in vlist] + return '\n'.join(( + 'printf "%s\\0" _start_', + 'for __v in %s; do' % ' '.join(vlist), + ' printf "%s=%s\\0" "$__v" "${!__v}";', + 'done', + '' + )) + + # the rendered 'bcmd' is bash syntax that does + # setup: declare variables we use (so they show up in 'all') + # varprinter(allvars): print all variables known at beginning + # content: execute the provided content + # varprinter(keylist): print all variables known after content + # + # output is then a null terminated array of: + # literal '_start_' + # key=value (for each preset variable) + # literal '_start_' + # key=value (for each post set variable) + bcmd = ('unset IFS\n' + + setup + + varprinter(allvars) + + '{\n%s\n\n:\n} > /dev/null\n' % content + + 'unset IFS\n' + + varprinter(keylist) + "\n") + + cmd = [] + if asuser is not None: + cmd = switch_user_cb(asuser) + + cmd.extend(bash) + + (output, _error) = util.subp(cmd, data=bcmd) + + # exclude vars in bash that change on their own or that we used + excluded = ("RANDOM", "LINENO", "_", "__v") + preset = {} + ret = {} + target = None + output = output[0:-1] # remove trailing null + + # go through output. First _start_ is for 'preset', second for 'target'. + # Add to target only things were changed and not in volitile + for line in output.split("\x00"): + try: + (key, val) = line.split("=", 1) + if target is preset: + target[key] = val + elif (key not in excluded and + (key in keylist_in or preset.get(key) != val)): + ret[key] = val + except ValueError: + if line != "_start_": + raise + if target is None: + target = preset + elif target is preset: + target = ret + + return ret + + +def read_context_disk_dir(source_dir, asuser=None): + """ + read_context_disk_dir(source_dir): + read source_dir and return a tuple with metadata dict and user-data + string populated. If not a valid dir, raise a NonContextDiskDir + """ + found = {} + for af in CONTEXT_DISK_FILES: + fn = os.path.join(source_dir, af) + if os.path.isfile(fn): + found[af] = fn + + if not found: + raise NonContextDiskDir("%s: %s" % (source_dir, "no files found")) + + context = {} + results = {'userdata': None, 'metadata': {}} + + if "context.sh" in found: + if asuser is not None: + try: + pwd.getpwnam(asuser) + except KeyError as e: + raise BrokenContextDiskDir("configured user '%s' " + "does not exist", asuser) + try: + with open(os.path.join(source_dir, 'context.sh'), 'r') as f: + content = f.read().strip() + + context = parse_shell_config(content, asuser=asuser) + except util.ProcessExecutionError as e: + raise BrokenContextDiskDir("Error processing context.sh: %s" % (e)) + except IOError as e: + raise NonContextDiskDir("Error reading context.sh: %s" % (e)) + else: + raise NonContextDiskDir("Missing context.sh") + + if not context: + return results + + results['metadata'] = context + + # process single or multiple SSH keys + ssh_key_var = None + if "SSH_KEY" in context: + ssh_key_var = "SSH_KEY" + elif "SSH_PUBLIC_KEY" in context: + ssh_key_var = "SSH_PUBLIC_KEY" + + if ssh_key_var: + lines = context.get(ssh_key_var).splitlines() + results['metadata']['public-keys'] = [l for l in lines + if len(l) and not l.startswith("#")] + + # custom hostname -- try hostname or leave cloud-init + # itself create hostname from IP address later + for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): + if k in context: + results['metadata']['local-hostname'] = context[k] + break + + # raw user data + if "USER_DATA" in context: + results['userdata'] = context["USER_DATA"] + elif "USERDATA" in context: + results['userdata'] = context["USERDATA"] + + # generate static /etc/network/interfaces + # only if there are any required context variables + # http://opennebula.org/documentation:rel3.8:cong#network_configuration + for k in context.keys(): + if re.match(r'^ETH\d+_IP$', k): + (out, _) = util.subp(['/sbin/ip', 'link']) + net = OpenNebulaNetwork(out, context) + results['network-interfaces'] = net.gen_conf() + break + + return results + + +# Used to match classes to dependencies +datasources = [ + (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )), + (DataSourceOpenNebulaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 1ce20c10..551b20c4 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -27,6 +27,7 @@ # +import base64 from cloudinit import log as logging from cloudinit import sources from cloudinit import util @@ -35,8 +36,6 @@ import os.path import serial -DEF_TTY_LOC = '/dev/ttyS1' -DEF_TTY_TIMEOUT = 60 LOG = logging.getLogger(__name__) SMARTOS_ATTRIB_MAP = { @@ -47,17 +46,66 @@ SMARTOS_ATTRIB_MAP = { 'user-data': ('user-data', False), 'iptables_disable': ('iptables_disable', True), 'motd_sys_info': ('motd_sys_info', True), + 'availability_zone': ('datacenter_name', True), +} + +DS_NAME = 'SmartOS' +DS_CFG_PATH = ['datasource', DS_NAME] +# BUILT-IN DATASOURCE CONFIGURATION +# The following is the built-in configuration. If the values +# are not set via the system configuration, then these default +# will be used: +# serial_device: which serial device to use for the meta-data +# seed_timeout: how long to wait on the device +# no_base64_decode: values which are not base64 encoded and +# are fetched directly from SmartOS, not meta-data values +# base64_keys: meta-data keys that are delivered in base64 +# base64_all: with the exclusion of no_base64_decode values, +# treat all meta-data as base64 encoded +# disk_setup: describes how to partition the ephemeral drive +# fs_setup: describes how to format the ephemeral drive +# +BUILTIN_DS_CONFIG = { + 'serial_device': '/dev/ttyS1', + 'seed_timeout': 60, + 'no_base64_decode': ['root_authorized_keys', + 'motd_sys_info', + 'iptables_disable'], + 'base64_keys': [], + 'base64_all': False, + 'disk_aliases': {'ephemeral0': '/dev/vdb'}, +} + +BUILTIN_CLOUD_CONFIG = { + 'disk_setup': { + 'ephemeral0': {'table_type': 'mbr', + 'layout': False, + 'overwrite': False} + }, + 'fs_setup': [{'label': 'ephemeral0', + 'filesystem': 'ext3', + 'device': 'ephemeral0'}], } class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) - self.seed_dir = os.path.join(paths.seed_dir, 'sdc') self.is_smartdc = None - self.seed = self.sys_cfg.get("serial_device", DEF_TTY_LOC) - self.seed_timeout = self.sys_cfg.get("serial_timeout", - DEF_TTY_TIMEOUT) + + self.ds_cfg = util.mergemanydict([ + self.ds_cfg, + util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), + BUILTIN_DS_CONFIG]) + + self.metadata = {} + self.cfg = BUILTIN_CLOUD_CONFIG + + self.seed = self.ds_cfg.get("serial_device") + self.seed_timeout = self.ds_cfg.get("serial_timeout") + self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') + self.b64_keys = self.ds_cfg.get('base64_keys') + self.b64_all = self.ds_cfg.get('base64_all') def __str__(self): root = sources.DataSource.__str__(self) @@ -70,7 +118,6 @@ class DataSourceSmartOS(sources.DataSource): if not os.path.exists(self.seed): LOG.debug("Host does not appear to be on SmartOS") return False - self.seed = self.seed dmi_info = dmi_data() if dmi_info is False: @@ -79,35 +126,60 @@ class DataSourceSmartOS(sources.DataSource): system_uuid, system_type = dmi_info if 'smartdc' not in system_type.lower(): - LOG.debug("Host is not on SmartOS") + LOG.debug("Host is not on SmartOS. system_type=%s", system_type) return False self.is_smartdc = True md['instance-id'] = system_uuid + b64_keys = self.query('base64_keys', strip=True, b64=False) + if b64_keys is not None: + self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] + + b64_all = self.query('base64_all', strip=True, b64=False) + if b64_all is not None: + self.b64_all = util.is_true(b64_all) + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): smartos_noun, strip = attribute - md[ci_noun] = query_data(smartos_noun, self.seed, - self.seed_timeout, strip=strip) + md[ci_noun] = self.query(smartos_noun, strip=strip) if not md['local-hostname']: md['local-hostname'] = system_uuid + ud = None if md['user-data']: ud = md['user-data'] - else: + elif md['user-script']: ud = md['user-script'] - self.metadata = md + self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud return True + def device_name_to_device(self, name): + return self.ds_cfg['disk_aliases'].get(name) + + def get_config_obj(self): + return self.cfg + def get_instance_id(self): return self.metadata['instance-id'] + def query(self, noun, strip=False, default=None, b64=None): + if b64 is None: + if noun in self.smartos_no_base64: + b64 = False + elif self.b64_all or noun in self.b64_keys: + b64 = True + + return query_data(noun=noun, strip=strip, seed_device=self.seed, + seed_timeout=self.seed_timeout, default=default, + b64=b64) + def get_serial(seed_device, seed_timeout): """This is replaced in unit testing, allowing us to replace - serial.Serial with a mocked class + serial.Serial with a mocked class. The timeout value of 60 seconds should never be hit. The value is taken from SmartOS own provisioning tools. Since we are reading @@ -124,12 +196,18 @@ def get_serial(seed_device, seed_timeout): return ser -def query_data(noun, seed_device, seed_timeout, strip=False): +def query_data(noun, seed_device, seed_timeout, strip=False, default=None, + b64=None): """Makes a request to via the serial console via "GET <NOUN>" In the response, the first line is the status, while subsequent lines are is the value. A blank line with a "." is used to indicate end of response. + + If the response is expected to be base64 encoded, then set b64encoded + to true. Unfortantely, there is no way to know if something is 100% + encoded, so this method relies on being told if the data is base64 or + not. """ if not noun: @@ -143,7 +221,7 @@ def query_data(noun, seed_device, seed_timeout, strip=False): if 'SUCCESS' not in status: ser.close() - return None + return default while not eom_found: m = ser.readline() @@ -153,12 +231,27 @@ def query_data(noun, seed_device, seed_timeout, strip=False): response.append(m) ser.close() - if not strip: - return "".join(response) + + if b64 is None: + b64 = query_data('b64-%s' % noun, seed_device=seed_device, + seed_timeout=seed_timeout, b64=False, + default=False, strip=True) + b64 = util.is_true(b64) + + resp = None + if b64 or strip: + resp = "".join(response).rstrip() else: - return "".join(response).rstrip() + resp = "".join(response) + + if b64: + try: + return base64.b64decode(resp) + except TypeError: + LOG.warn("Failed base64 decoding key '%s'", noun) + return resp - return None + return resp def dmi_data(): @@ -181,7 +274,7 @@ def dmi_data(): except Exception as e: util.logexc(LOG, "Failed to get system UUID", e) - return sys_uuid.lower(), sys_type + return (sys_uuid.lower().strip(), sys_type.strip()) # Used to match classes to dependencies diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 974c0407..7dc1fbde 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -53,9 +53,16 @@ class DataSource(object): self.userdata = None self.metadata = None self.userdata_raw = None + + # find the datasource config name. + # remove 'DataSource' from classname on front, and remove 'Net' on end. + # Both Foo and FooNet sources expect config in cfg['sources']['Foo'] name = type_utils.obj_name(self) if name.startswith(DS_PREFIX): name = name[len(DS_PREFIX):] + if name.endswith('Net'): + name = name[0:-3] + self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, ("datasource", name), {}) if not ud_proc: @@ -144,7 +151,7 @@ class DataSource(object): return "iid-datasource" return str(self.metadata['instance-id']) - def get_hostname(self, fqdn=False): + def get_hostname(self, fqdn=False, resolve_ip=False): defdomain = "localdomain" defhost = "localhost" domain = defdomain @@ -168,7 +175,14 @@ class DataSource(object): # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx lhost = self.metadata['local-hostname'] if util.is_ipv4(lhost): - toks = ["ip-%s" % lhost.replace(".", "-")] + toks = [] + if resolve_ip: + toks = util.gethostbyaddr(lhost) + + if toks: + toks = str(toks).split('.') + else: + toks = ["ip-%s" % lhost.replace(".", "-")] else: toks = lhost.split(".") diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 3e49e8c5..07c55802 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -375,7 +375,9 @@ class Init(object): mod = importer.import_module(mod_locs[0]) mod = handlers.fixup_handler(mod) types = c_handlers.register(mod) - LOG.debug("Added handler for %s from %s", types, fname) + if types: + LOG.debug("Added custom handler for %s from %s", + types, fname) except Exception: util.logexc(LOG, "Failed to register handler from %s", fname) @@ -386,10 +388,10 @@ class Init(object): # Register any other handlers that come from the default set. This # is done after the cloud-dir handlers so that the cdir modules can # take over the default user-data handler content-types. - def_handlers = self._default_userdata_handlers() - applied_def_handlers = c_handlers.register_defaults(def_handlers) - if applied_def_handlers: - LOG.debug("Registered default handlers: %s", applied_def_handlers) + for mod in self._default_userdata_handlers(): + types = c_handlers.register(mod, overwrite=False) + if types: + LOG.debug("Added default handler for %s from %s", types, mod) # Form our cloud interface data = self.cloudify() diff --git a/cloudinit/util.py b/cloudinit/util.py index 4a74ba57..a8ddb390 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -32,6 +32,7 @@ import grp import gzip import hashlib import os +import os.path import platform import pwd import random @@ -161,13 +162,13 @@ class SeLinuxGuard(object): self.recursive = recursive def __enter__(self): - if self.selinux: + if self.selinux and self.selinux.is_selinux_enabled(): return True else: return False def __exit__(self, excp_type, excp_value, excp_traceback): - if self.selinux: + if self.selinux and self.selinux.is_selinux_enabled(): path = os.path.realpath(os.path.expanduser(self.path)) do_restore = False try: @@ -360,11 +361,21 @@ def multi_log(text, console=True, stderr=True, if stderr: sys.stderr.write(text) if console: - # Don't use the write_file since - # this might be 'sensitive' info (not debug worthy?) - with open('/dev/console', 'wb') as wfh: - wfh.write(text) - wfh.flush() + conpath = "/dev/console" + if os.path.exists(conpath): + with open(conpath, 'wb') as wfh: + wfh.write(text) + wfh.flush() + else: + # A container may lack /dev/console (arguably a container bug). If + # it does not exist, then write output to stdout. this will result + # in duplicate stderr and stdout messages if stderr was True. + # + # even though upstart or systemd might have set up output to go to + # /dev/console, the user may have configured elsewhere via + # cloud-config 'output'. If there is /dev/console, messages will + # still get there. + sys.stdout.write(text) if log: if text[-1] == "\n": log.log(log_level, text[:-1]) @@ -955,6 +966,13 @@ def get_hostname(): return hostname +def gethostbyaddr(ip): + try: + return socket.gethostbyaddr(ip)[0] + except socket.herror: + return None + + def is_resolvable_url(url): """determine if this url is resolvable (existing or ip).""" return (is_resolvable(urlparse.urlparse(url).hostname)) @@ -1719,6 +1737,15 @@ def parse_mount_info(path, mountinfo_lines, log=LOG): return None +def parse_mtab(path): + """On older kernels there's no /proc/$$/mountinfo, so use mtab.""" + for line in load_file("/etc/mtab").splitlines(): + devpth, mount_point, fs_type = line.split()[:3] + if mount_point == path: + return devpth, fs_type, mount_point + return None + + def get_mount_info(path, log=LOG): # Use /proc/$$/mountinfo to find the device where path is mounted. # This is done because with a btrfs filesystem using os.stat(path) @@ -1749,8 +1776,11 @@ def get_mount_info(path, log=LOG): # So use /proc/$$/mountinfo to find the device underlying the # input path. mountinfo_path = '/proc/%s/mountinfo' % os.getpid() - lines = load_file(mountinfo_path).splitlines() - return parse_mount_info(path, lines, log) + if os.path.exists(mountinfo_path): + lines = load_file(mountinfo_path).splitlines() + return parse_mount_info(path, lines, log) + else: + return parse_mtab(path) def which(program): @@ -1791,17 +1821,29 @@ def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False): ret = func(*args, **kwargs) finally: delta = time.time() - start + udelta = None if ustart is not None: try: udelta = float(uptime()) - ustart except ValueError: - udelta = "N/A" + pass tmsg = " took %0.3f seconds" % delta if get_uptime: - tmsg += "(%0.2f)" % udelta + if isinstance(udelta, (float)): + tmsg += " (%0.2f)" % udelta + else: + tmsg += " (N/A)" try: logfunc(msg + tmsg) except: pass return ret + + +def expand_dotted_devname(dotted): + toks = dotted.rsplit(".", 1) + if len(toks) > 1: + return toks + else: + return (dotted, None) diff --git a/cloudinit/version.py b/cloudinit/version.py index 4b29a587..3db57235 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -20,7 +20,7 @@ from distutils import version as vr def version(): - return vr.StrictVersion("0.7.3") + return vr.StrictVersion("0.7.5") def version_string(): |