summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2013-09-20 20:39:55 -0400
committerScott Moser <smoser@ubuntu.com>2013-09-20 20:39:55 -0400
commit35469c11703b1df66b3b14ad06b583d672f39ff5 (patch)
treee04e01cefb6b1ca3c39a4d782a5236114d81d5e9
parent266d12c5777d5fba97c374c33cb4f31d50e2d347 (diff)
parentfc97491fef9780a03cca6b78b477cbf75856f46c (diff)
downloadvyos-cloud-init-35469c11703b1df66b3b14ad06b583d672f39ff5.tar.gz
vyos-cloud-init-35469c11703b1df66b3b14ad06b583d672f39ff5.zip
add support for partitioning and creating filesystems
If the disks that are attached on boot do not have a filesystem on them, then this module is useful to set that up. LP: #1218506
-rw-r--r--ChangeLog2
-rw-r--r--cloudinit/config/cc_disk_setup.py693
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py72
-rw-r--r--config/cloud.cfg1
-rw-r--r--doc/examples/cloud-config-disk-setup.txt210
-rw-r--r--doc/sources/smartos/README.rst27
-rw-r--r--tests/unittests/test_datasource/test_smartos.py35
7 files changed, 1019 insertions, 21 deletions
diff --git a/ChangeLog b/ChangeLog
index a53a6b6f..366292d3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -18,6 +18,8 @@
- support random data seed from config drive or azure, and a module
'seed_random' to read that and write it to /dev/urandom.
- add OpenNebula Datasource [Vlastimil Holer]
+ - add 'cc_disk_setup' config module for paritioning disks and creating
+ filesystems. Useful if attached disks are not formatted (LP: #1218506)
0.7.2:
- add a debian watch file
- add 'sudo' entry to ubuntu's default user (LP: #1080717)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
new file mode 100644
index 00000000..fb404c5d
--- /dev/null
+++ b/cloudinit/config/cc_disk_setup.py
@@ -0,0 +1,693 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+import logging
+import shlex
+
+frequency = PER_INSTANCE
+
+# Define the commands to use
+UDEVADM_CMD = util.which('udevadm')
+SFDISK_CMD = util.which("sfdisk")
+LSBLK_CMD = util.which("lsblk")
+BLKID_CMD = util.which("blkid")
+BLKDEV_CMD = util.which("blockdev")
+
+LOG = logging.getLogger(__name__)
+
+
+def handle(_name, cfg, cloud, log, _args):
+ """
+ Call util.prep_disk for disk_setup cloud-config.
+ See doc/examples/cloud-config_disk-setup.txt for documentation on the
+ format.
+ """
+ disk_setup = cfg.get("disk_setup")
+ if isinstance(disk_setup, dict):
+ log.info("Partitioning disks.")
+ for disk, definition in disk_setup.items():
+ if not isinstance(definition, dict):
+ log.warn("Invalid disk definition for %s" % disk)
+ continue
+
+ try:
+ log.debug("Creating new partition table/disk")
+ util.log_time(logfunc=LOG.debug,
+ msg="Creating partition on %s" % disk,
+ func=mkpart, args=(disk, cloud, definition))
+ except Exception as e:
+ util.logexc(LOG, "Failed partitioning operation\n%s" % e)
+
+ fs_setup = cfg.get("fs_setup")
+ if isinstance(fs_setup, list):
+ log.info("Creating file systems.")
+ for definition in fs_setup:
+ if not isinstance(definition, dict):
+ log.warn("Invalid file system definition: %s" % definition)
+ continue
+
+ try:
+ log.debug("Creating new filesystem.")
+ device = definition.get('device')
+ util.log_time(logfunc=LOG.debug,
+ msg="Creating fs for %s" % device,
+ func=mkfs, args=(cloud, definition))
+ except Exception as e:
+ util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
+
+
+def is_default_device(name, cloud, fallback=None):
+ """
+ Ask the cloud datasource if the 'name' maps to a default
+ device. If so, return that value, otherwise return 'name', or
+ fallback if so defined.
+ """
+
+ _dev = None
+ try:
+ _dev = cloud.device_name_to_device(name)
+ except Exception as e:
+ util.logexc(LOG, "Failed to find mapping for %s" % e)
+
+ if _dev:
+ return _dev
+
+ if fallback:
+ return fallback
+
+ return name
+
+
+def value_splitter(values, start=None):
+ """
+ Returns the key/value pairs of output sent as string
+ like: FOO='BAR' HOME='127.0.0.1'
+ """
+ _values = shlex.split(values)
+ if start:
+ _values = _values[start:]
+
+ for key, value in [x.split('=') for x in _values]:
+ yield key, value
+
+
+def device_type(device):
+ """
+ Return the device type of the device by calling lsblk.
+ """
+
+ lsblk_cmd = [LSBLK_CMD, '--pairs', '--nodeps', '--out', 'NAME,TYPE',
+ device]
+ info = None
+ try:
+ info, _err = util.subp(lsblk_cmd)
+ except Exception as e:
+ raise Exception("Failed during disk check for %s\n%s" % (device, e))
+
+ for key, value in value_splitter(info):
+ if key.lower() == "type":
+ return value.lower()
+
+ return None
+
+
+def is_device_valid(name, partition=False):
+ """
+ Check if the device is a valid device.
+ """
+ d_type = ""
+ try:
+ d_type = device_type(name)
+ except:
+ LOG.warn("Query against device %s failed" % name)
+ return False
+
+ if partition and d_type == 'part':
+ return True
+ elif not partition and d_type == 'disk':
+ return True
+ return False
+
+
+def check_fs(device):
+ """
+ Check if the device has a filesystem on it
+
+ Output of blkid is generally something like:
+ /dev/sda: LABEL="Backup500G" UUID="..." TYPE="ext4"
+
+ Return values are device, label, type, uuid
+ """
+ out, label, fs_type, uuid = None, None, None, None
+
+ blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
+ try:
+ out, _err = util.subp(blkid_cmd, rcs=[0, 2])
+ except Exception as e:
+ raise Exception("Failed during disk check for %s\n%s" % (device, e))
+
+ if out:
+ if len(out.splitlines()) == 1:
+ for key, value in value_splitter(out, start=1):
+ if key.lower() == 'label':
+ label = value
+ elif key.lower() == 'type':
+ fs_type = value
+ elif key.lower() == 'uuid':
+ uuid = value
+
+ return label, fs_type, uuid
+
+
+def is_filesystem(device):
+ """
+ Returns true if the device has a file system.
+ """
+ _, fs_type, _ = check_fs(device)
+ return fs_type
+
+
+def find_device_node(device, fs_type=None, label=None, valid_targets=None,
+ label_match=True):
+ """
+ Find a device that is either matches the spec, or the first
+
+ The return is value is (<device>, <bool>) where the device is the
+ device to use and the bool is whether the device matches the
+ fs_type and label.
+
+ Note: This works with GPT partition tables!
+ """
+ if not valid_targets:
+ valid_targets = ['disk', 'part']
+
+ lsblk_cmd = [LSBLK_CMD, '--pairs', '--out', 'NAME,TYPE,FSTYPE,LABEL',
+ device]
+ info = None
+ try:
+ info, _err = util.subp(lsblk_cmd)
+ except Exception as e:
+ raise Exception("Failed during disk check for %s\n%s" % (device, e))
+
+ raw_device_used = False
+ parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
+
+ for part in parts:
+ d = {'name': None,
+ 'type': None,
+ 'fstype': None,
+ 'label': None,
+ }
+
+ for key, value in value_splitter(part):
+ d[key.lower()] = value
+
+ if d['fstype'] == fs_type and \
+ ((label_match and d['label'] == label) or not label_match):
+ # If we find a matching device, we return that
+ return ('/dev/%s' % d['name'], True)
+
+ if d['type'] in valid_targets:
+
+ if d['type'] != 'disk' or d['fstype']:
+ raw_device_used = True
+
+ if d['type'] == 'disk':
+ # Skip the raw disk, its the default
+ pass
+
+ elif not d['fstype']:
+ return ('/dev/%s' % d['name'], False)
+
+ if not raw_device_used:
+ return (device, False)
+
+ LOG.warn("Failed to find device during available device search.")
+ return (None, False)
+
+
+def is_disk_used(device):
+ """
+ Check if the device is currently used. Returns false if there
+ is no filesystem found on the disk.
+ """
+ lsblk_cmd = [LSBLK_CMD, '--pairs', '--out', 'NAME,TYPE',
+ device]
+ info = None
+ try:
+ info, _err = util.subp(lsblk_cmd)
+ except Exception as e:
+ # if we error out, we can't use the device
+ util.logexc(LOG,
+ "Error checking for filesystem on %s\n%s" % (device, e))
+ return True
+
+ # If there is any output, then the device has something
+ if len(info.splitlines()) > 1:
+ return True
+
+ return False
+
+
+def get_hdd_size(device):
+ """
+ Returns the hard disk size.
+ This works with any disk type, including GPT.
+ """
+
+ size_cmd = [SFDISK_CMD, '--show-size', device]
+ size = None
+ try:
+ size, _err = util.subp(size_cmd)
+ except Exception as e:
+ raise Exception("Failed to get %s size\n%s" % (device, e))
+
+ return int(size.strip())
+
+
+def get_dyn_func(*args):
+ """
+ Call the appropriate function.
+
+ The first value is the template for function name
+ The second value is the template replacement
+ The remain values are passed to the function
+
+ For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,)
+ would call "foo_bar" with args of 1, 2, 3
+ """
+ if len(args) < 2:
+ raise Exception("Unable to determine dynamic funcation name")
+
+ func_name = (args[0] % args[1])
+ func_args = args[2:]
+
+ try:
+ if func_args:
+ return globals()[func_name](*func_args)
+ else:
+ return globals()[func_name]
+
+ except KeyError:
+ raise Exception("No such function %s to call!" % func_name)
+
+
+def check_partition_mbr_layout(device, layout):
+ """
+ Returns true if the partition layout matches the one on the disk
+
+ Layout should be a list of values. At this time, this only
+ verifies that the number of partitions and their labels is correct.
+ """
+
+ read_parttbl(device)
+ prt_cmd = [SFDISK_CMD, "-l", device]
+ try:
+ out, _err = util.subp(prt_cmd, data="%s\n" % layout)
+ except Exception as e:
+ raise Exception("Error running partition command on %s\n%s" % (
+ device, e))
+
+ found_layout = []
+ for line in out.splitlines():
+ _line = line.split()
+ if len(_line) == 0:
+ continue
+
+ if device in _line[0]:
+ # We don't understand extended partitions yet
+ if _line[-1].lower() in ['extended', 'empty']:
+ continue
+
+ # Find the partition types
+ type_label = None
+ for x in sorted(range(1, len(_line)), reverse=True):
+ if _line[x].isdigit() and _line[x] != '/':
+ type_label = _line[x]
+ break
+
+ found_layout.append(type_label)
+
+ if isinstance(layout, bool):
+ # if we are using auto partitioning, or "True" be happy
+ # if a single partition exists.
+ if layout and len(found_layout) >= 1:
+ return True
+ return False
+
+ else:
+ if len(found_layout) != len(layout):
+ return False
+ else:
+ # This just makes sure that the number of requested
+ # partitions and the type labels are right
+ for x in range(1, len(layout) + 1):
+ if isinstance(layout[x - 1], tuple):
+ _, part_type = layout[x]
+ if int(found_layout[x]) != int(part_type):
+ return False
+ return True
+
+ return False
+
+
+def check_partition_layout(table_type, device, layout):
+ """
+ See if the partition lay out matches.
+
+ This is future a future proofing function. In order
+ to add support for other disk layout schemes, add a
+ function called check_partition_%s_layout
+ """
+ return get_dyn_func("check_partition_%s_layout", table_type, device,
+ layout)
+
+
+def get_partition_mbr_layout(size, layout):
+ """
+ Calculate the layout of the partition table. Partition sizes
+ are defined as percentage values or a tuple of percentage and
+ partition type.
+
+ For example:
+ [ 33, [66: 82] ]
+
+ Defines the first partition to be a size of 1/3 the disk,
+ while the remaining 2/3's will be of type Linux Swap.
+ """
+
+ if not isinstance(layout, list) and isinstance(layout, bool):
+ # Create a single partition
+ return "0,"
+
+ if (len(layout) == 0 and isinstance(layout, list)) or \
+ not isinstance(layout, list):
+ raise Exception("Partition layout is invalid")
+
+ last_part_num = len(layout)
+ if last_part_num > 4:
+ raise Exception("Only simply partitioning is allowed.")
+
+ part_definition = []
+ part_num = 0
+ for part in layout:
+ part_type = 83 # Default to Linux
+ percent = part
+ part_num += 1
+
+ if isinstance(part, list):
+ if len(part) != 2:
+ raise Exception("Partition was incorrectly defined: %s" % \
+ part)
+ percent, part_type = part
+
+ part_size = int((float(size) * (float(percent) / 100)) / 1024)
+
+ if part_num == last_part_num:
+ part_definition.append(",,%s" % part_type)
+ else:
+ part_definition.append(",%s,%s" % (part_size, part_type))
+
+ sfdisk_definition = "\n".join(part_definition)
+ if len(part_definition) > 4:
+ raise Exception("Calculated partition definition is too big\n%s" %
+ sfdisk_definition)
+
+ return sfdisk_definition
+
+
+def get_partition_layout(table_type, size, layout):
+ """
+ Call the appropriate function for creating the table
+ definition. Returns the table definition
+
+ This is a future proofing function. To add support for
+ other layouts, simply add a "get_partition_%s_layout"
+ function.
+ """
+ return get_dyn_func("get_partition_%s_layout", table_type, size, layout)
+
+
+def read_parttbl(device):
+ """
+ Use partprobe instead of 'udevadm'. Partprobe is the only
+ reliable way to probe the partition table.
+ """
+ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
+ udev_cmd = [UDEVADM_CMD, 'settle']
+ try:
+ util.subp(udev_cmd)
+ util.subp(blkdev_cmd)
+ util.subp(udev_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed reading the partition table %s" % e)
+
+
+def exec_mkpart_mbr(device, layout):
+ """
+ Break out of mbr partition to allow for future partition
+ types, i.e. gpt
+ """
+ # Create the partitions
+ prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device]
+ try:
+ util.subp(prt_cmd, data="%s\n" % layout)
+ except Exception as e:
+ raise Exception("Failed to partition device %s\n%s" % (device, e))
+
+ read_parttbl(device)
+
+
+def exec_mkpart(table_type, device, layout):
+ """
+ Fetches the function for creating the table type.
+ This allows to dynamically find which function to call.
+
+ Paramaters:
+ table_type: type of partition table to use
+ device: the device to work on
+ layout: layout definition specific to partition table
+ """
+ return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
+
+
+def mkpart(device, cloud, definition):
+ """
+ Creates the partition table.
+
+ Parameters:
+ cloud: the cloud object
+ definition: dictionary describing how to create the partition.
+
+ The following are supported values in the dict:
+ overwrite: Should the partition table be created regardless
+ of any pre-exisiting data?
+ layout: the layout of the partition table
+ table_type: Which partition table to use, defaults to MBR
+ device: the device to work on.
+ """
+
+ LOG.debug("Checking values for %s definition" % device)
+ overwrite = definition.get('overwrite', False)
+ layout = definition.get('layout', False)
+ table_type = definition.get('table_type', 'mbr')
+ _device = is_default_device(device, cloud)
+
+ # Check if the default device is a partition or not
+ LOG.debug("Checking against default devices")
+ if _device and (_device != device):
+ if not is_device_valid(_device):
+ _device = _device[:-1]
+
+ if not is_device_valid(_device):
+ raise Exception("Unable to find backing block device for %s" % \
+ device)
+ else:
+ LOG.debug("Mapped %s to physical device %s" % (device, _device))
+ device = _device
+
+ if (isinstance(layout, bool) and not layout) or not layout:
+ LOG.debug("Device is not to be partitioned, skipping")
+ return # Device is not to be partitioned
+
+ # This prevents you from overwriting the device
+ LOG.debug("Checking if device %s is a valid device" % device)
+ if not is_device_valid(device):
+ raise Exception("Device %s is not a disk device!" % device)
+
+ LOG.debug("Checking if device layout matches")
+ if check_partition_layout(table_type, device, layout):
+ LOG.debug("Device partitioning layout matches")
+ return True
+
+ LOG.debug("Checking if device is safe to partition")
+ if not overwrite and (is_disk_used(device) or is_filesystem(device)):
+ LOG.debug("Skipping partitioning on configured device %s" % device)
+ return
+
+ LOG.debug("Checking for device size")
+ device_size = get_hdd_size(device)
+
+ LOG.debug("Calculating partition layout")
+ part_definition = get_partition_layout(table_type, device_size, layout)
+ LOG.debug(" Layout is: %s" % part_definition)
+
+ LOG.debug("Creating partition table on %s" % device)
+ exec_mkpart(table_type, device, part_definition)
+
+ LOG.debug("Partition table created for %s" % device)
+
+
+def mkfs(cloud, fs_cfg):
+ """
+ Create a file system on the device.
+
+ label: defines the label to use on the device
+ fs_cfg: defines how the filesystem is to look
+ The following values are required generally:
+ device: which device or cloud defined default_device
+ filesystem: which file system type
+ overwrite: indiscriminately create the file system
+ partition: when device does not define a partition,
+ setting this to a number will mean
+ device + partition. When set to 'auto', the
+ first free device or the first device which
+ matches both label and type will be used.
+
+ 'any' means the first filesystem that matches
+ on the device.
+
+ When 'cmd' is provided then no other parameter is required.
+ """
+ fs_cfg['partition'] = 'any'
+ label = fs_cfg.get('label')
+ device = fs_cfg.get('device')
+ partition = str(fs_cfg.get('partition'))
+ fs_type = fs_cfg.get('filesystem')
+ fs_cmd = fs_cfg.get('cmd', [])
+ fs_opts = fs_cfg.get('extra_opts', [])
+ overwrite = fs_cfg.get('overwrite', False)
+
+ # This allows you to define the default ephemeral or swap
+ LOG.debug("Checking %s against default devices" % device)
+ _device = is_default_device(label, cloud, fallback=device)
+ if _device and (_device != device):
+ if not is_device_valid(_device):
+ raise Exception("Unable to find backing block device for %s" % \
+ device)
+ else:
+ LOG.debug("Mapped %s to physical device %s" % (device, _device))
+ device = _device
+
+ if not partition or partition.isdigit():
+ # Handle manual definition of partition
+ if partition.isdigit():
+ device = "%s%s" % (device, partition)
+ LOG.debug("Manual request of partition %s for %s" % (
+ partition, device))
+
+ # Check to see if the fs already exists
+ LOG.debug("Checking device %s" % device)
+ check_label, check_fstype, _ = check_fs(device)
+ LOG.debug("Device %s has %s %s" % (device, check_label, check_fstype))
+
+ if check_label == label and check_fstype == fs_type:
+ LOG.debug("Existing file system found at %s" % device)
+
+ if not overwrite:
+ LOG.warn("Device %s has required file system" % device)
+ return
+ else:
+ LOG.warn("Destroying filesystem on %s" % device)
+
+ else:
+ LOG.debug("Device %s is cleared for formating" % device)
+
+ elif partition and str(partition).lower() in ('auto', 'any'):
+ # For auto devices, we match if the filesystem does exist
+ odevice = device
+ LOG.debug("Identifying device to create %s filesytem on" % label)
+
+ # any mean pick the first match on the device with matching fs_type
+ label_match = True
+ if partition.lower() == 'any':
+ label_match = False
+
+ device, reuse = find_device_node(device, fs_type=fs_type, label=label,
+ label_match=label_match)
+ LOG.debug("Automatic device for %s identified as %s" % (
+ odevice, device))
+
+ if reuse:
+ LOG.debug("Found filesystem match, skipping formating.")
+ return
+
+ if not device:
+ LOG.debug("No device aviable that matches request.")
+ LOG.debug("Skipping fs creation for %s" % fs_cfg)
+ return
+
+ else:
+ LOG.debug("Error in device identification handling.")
+ return
+
+ LOG.debug("File system %s will be created on %s" % (label, device))
+
+ # Make sure the device is defined
+ if not device:
+ LOG.critical("Device is not known: %s" % fs_cfg)
+ return
+
+ # Check that we can create the FS
+ if not label or not fs_type:
+ LOG.debug("Command to create filesystem %s is bad. Skipping." % \
+ label)
+
+ # Create the commands
+ if fs_cmd:
+ fs_cmd = fs_cfg['cmd'] % {'label': label,
+ 'filesystem': fs_type,
+ 'device': device,
+ }
+ else:
+ # Find the mkfs command
+ mkfs_cmd = util.which("mkfs.%s" % fs_type)
+ if not mkfs_cmd:
+ mkfs_cmd = util.which("mk%s" % fs_type)
+
+ if not mkfs_cmd:
+ LOG.critical("Unable to locate command to create filesystem.")
+ return
+
+ fs_cmd = [mkfs_cmd, device]
+
+ if label:
+ fs_cmd.extend(["-L", label])
+
+ # Add the extends FS options
+ if fs_opts:
+ fs_cmd.extend(fs_opts)
+
+ LOG.debug("Creating file system %s on %s" % (label, device))
+ LOG.debug(" Using cmd: %s" % "".join(fs_cmd))
+ try:
+ util.subp(fs_cmd)
+ except Exception as e:
+ raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index d348d20b..da1eec79 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -35,8 +35,7 @@ import os
import os.path
import serial
-DEF_TTY_LOC = '/dev/ttyS1'
-DEF_TTY_TIMEOUT = 60
+
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
@@ -49,24 +48,61 @@ SMARTOS_ATTRIB_MAP = {
'motd_sys_info': ('motd_sys_info', True),
}
-# These are values which will never be base64 encoded.
-# They come from the cloud platform, not user
-SMARTOS_NO_BASE64 = ['root_authorized_keys', 'motd_sys_info',
- 'iptables_disable']
+DS_NAME = 'SmartOS'
+DS_CFG_PATH = ['datasource', DS_NAME]
+# BUILT-IN DATASOURCE CONFIGURATION
+# The following is the built-in configuration. If the values
+# are not set via the system configuration, then these default
+# will be used:
+# serial_device: which serial device to use for the meta-data
+# seed_timeout: how long to wait on the device
+# no_base64_decode: values which are not base64 encoded and
+# are fetched directly from SmartOS, not meta-data values
+# base64_keys: meta-data keys that are delivered in base64
+# base64_all: with the exclusion of no_base64_decode values,
+# treat all meta-data as base64 encoded
+# disk_setup: describes how to partition the ephemeral drive
+# fs_setup: describes how to format the ephemeral drive
+#
+BUILTIN_DS_CONFIG = {
+ 'serial_device': '/dev/ttyS1',
+ 'seed_timeout': 60,
+ 'no_base64_decode': ['root_authorized_keys',
+ 'motd_sys_info',
+ 'iptables_disable'],
+ 'base64_keys': [],
+ 'base64_all': False,
+ 'ephemeral_disk': '/dev/vdb',
+ 'disk_setup': {
+ 'ephemeral0': {'table_type': 'mbr',
+ 'layout': True,
+ 'overwrite': False}
+ },
+ 'fs_setup': [{'label': 'ephemeral0', 'filesystem': 'ext3',
+ 'device': '/dev/xvdb', 'partition': 'auto'}],
+}
class DataSourceSmartOS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'sdc')
self.is_smartdc = None
- self.seed = self.ds_cfg.get("serial_device", DEF_TTY_LOC)
- self.seed_timeout = self.ds_cfg.get("serial_timeout", DEF_TTY_TIMEOUT)
- self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode',
- SMARTOS_NO_BASE64)
- self.b64_keys = self.ds_cfg.get('base64_keys', [])
- self.b64_all = self.ds_cfg.get('base64_all', False)
+ self.ds_cfg = util.mergemanydict([
+ self.ds_cfg,
+ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
+ BUILTIN_DS_CONFIG])
+
+ self.metadata = {}
+ self.cfg = {}
+ self.cfg['disk_setup'] = self.ds_cfg.get('disk_setup')
+ self.cfg['fs_setup'] = self.ds_cfg.get('fs_setup')
+
+ self.seed = self.ds_cfg.get("serial_device")
+ self.seed_timeout = self.ds_cfg.get("serial_timeout")
+ self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
+ self.b64_keys = self.ds_cfg.get('base64_keys')
+ self.b64_all = self.ds_cfg.get('base64_all')
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -79,7 +115,6 @@ class DataSourceSmartOS(sources.DataSource):
if not os.path.exists(self.seed):
LOG.debug("Host does not appear to be on SmartOS")
return False
- self.seed = self.seed
dmi_info = dmi_data()
if dmi_info is False:
@@ -114,10 +149,17 @@ class DataSourceSmartOS(sources.DataSource):
elif md['user-script']:
ud = md['user-script']
- self.metadata = md
+ self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
return True
+ def device_name_to_device(self, name):
+ if 'ephemeral0' in name:
+ return self.ds_cfg['ephemeral_disk']
+
+ def get_config_obj(self):
+ return self.cfg
+
def get_instance_id(self):
return self.metadata['instance-id']
diff --git a/config/cloud.cfg b/config/cloud.cfg
index cce1f376..382cfaf8 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -42,6 +42,7 @@ cloud_config_modules:
# Emit the cloud config ready event
# this can be used by upstart jobs for 'start on cloud-config'.
- emit_upstart
+ - disk_setup
- mounts
- ssh-import-id
- locale
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
new file mode 100644
index 00000000..db2c52a7
--- /dev/null
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -0,0 +1,210 @@
+Cloud-init supports the creation of simple partition tables and file systems
+on devices.
+
+Default disk definitions for AWS
+--------------------------------
+(Not implemented yet, but provided for future documentation)
+
+ disk_setup:
+ ephmeral0:
+ type: 'mbr'
+ layout: True
+ overwrite: False
+
+ fs_setup:
+ - label: None,
+ filesystem: ext3
+ device: ephemeral0
+ partition: auto
+
+Default disk definitions for Windows Azure
+------------------------------------------
+(Not implemented yet due to conflict with WALinuxAgent in Ubuntu)
+
+disk_setup:
+ /dev/sdb:
+ type: mbr
+ layout: True
+ overwrite: False
+
+fs_setup:
+ - label: ephemeral0
+ filesystem: ext3
+ device: ephemeral0
+ partition: any
+
+
+Default disk definitions for SmartOS
+------------------------------------
+
+ephemeral_disk: /dev/vdb
+disk_setup:
+ /dev/vdb:
+ type: mbr
+ layout: True
+ overwrite: False
+
+fs_setup:
+ - label: ephemeral0
+ filesystem: ext3
+ device: /dev/vdb
+ partition: 1
+
+Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will
+ not be automatically added to the mounts.
+
+
+The default definition is used to make sure that the ephemeral storage is
+setup properly.
+
+"disk_setup": disk partitioning
+--------------------------------
+
+The disk_setup directive instructs Cloud-init to partition a disk. The format is:
+
+ disk_setup:
+ ephmeral0:
+ type: 'mbr'
+ layout: 'auto'
+ /dev/xvdh:
+ type: 'mbr'
+ layout:
+ - 33
+ - [33, 82]
+ - 33
+ overwrite: True
+
+The format is a list of dicts of dicts. The first value is the name of the
+device and the subsequent values define how to create and layout the partition.
+
+The general format is:
+ disk_setup:
+ <DEVICE>:
+ type: 'mbr'
+ layout: <LAYOUT|BOOL>
+ overwrite: <BOOL>
+
+Where:
+ <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
+ values which are specific to the cloud. For these devices
+ Cloud-init will look up what the real devices is and then
+ use it.
+
+ For other devices, the kernel device name is used. At this
+ time only simply kernel devices are supported, meaning
+ that device mapper and other targets may not work.
+
+ Note: At this time, there is no handling or setup of
+ device mapper targets.
+
+ type=<TYPE>: Currently the following are supported:
+ 'mbr': default and setups a MS-DOS partition table
+
+ Note: At this time only 'mbr' partition tables are allowed.
+ It is anticipated in the future that we'll have GPT as
+ option in the future, or even "RAID" to create a mdadm
+ RAID.
+
+ layout={...}: The device layout. This is a list of values, with the
+ percentage of disk that partition will take.
+ Valid options are:
+ [<SIZE>, [<SIZE>, <PART_TYPE]]
+
+ Where <SIZE> is the _percentage_ of the disk to use, while
+ <PART_TYPE> is the numerical value of the partition type.
+
+ The following setups two partitions, with the first
+ partition having a swap label, taking 1/3 of the disk space
+ and the remainder being used as the second partition.
+ /dev/xvdh':
+ type: 'mbr'
+ layout:
+ - [33,82]
+ - 66
+ overwrite: True
+
+ When layout is "true" it means single partition the entire
+ device.
+
+ When layout is "false" it means don't partition or ignore
+ existing partitioning.
+
+ If layout is set to "true" and overwrite is set to "false",
+ it will skip partitioning the device without a failure.
+
+ overwrite=<BOOL>: This describes whether to ride with saftey's on and
+ everything holstered.
+
+ 'false' is the default, which means that:
+ 1. The device will be checked for a partition table
+ 2. The device will be checked for a file system
+ 3. If either a partition of file system is found, then
+ the operation will be _skipped_.
+
+ 'true' is cowboy mode. There are no checks and things are
+ done blindly. USE with caution, you can do things you
+ really, really don't want to do.
+
+
+fs_setup: Setup the file system
+-------------------------------
+
+fs_setup describes the how the file systems are supposed to look.
+
+ fs_setup:
+ - label: ephemeral0
+ filesystem: 'ext3'
+ device: 'ephemeral0'
+ partition: 'auto'
+ - label: mylabl2
+ filesystem: 'ext4'
+ device: '/dev/xvda1'
+ - special:
+ cmd: mkfs -t %(FILESYSTEM)s -L %(LABEL)s %(DEVICE)s
+ filesystem: 'btrfs'
+ device: '/dev/xvdh'
+
+The general format is:
+ fs_setup:
+ - label: <LABEL>
+ filesystem: <FS_TYPE>
+ device: <DEVICE>
+ partition: <PART_VALUE>
+ overwrite: <OVERWRITE>
+
+Where:
+ <LABEL>: The file system label to be used. If set to None, no label is
+ used.
+
+ <FS_TYPE>: The file system type. It is assumed that the there
+ will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
+ Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
+ and vfat by default.
+
+ <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
+ are allowed and the actual device is acquired from the cloud datasource.
+ When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
+ label as 'ephemeralX' otherwise there may be issues with the mounting
+ of the ephemeral storage layer.
+
+ <PART_VALUE>: The valid options are:
+ "auto": auto is a special in the sense that you are telling cloud-init
+ not to care whether there is a partition or not. Auto will put the
+ first partition that does not contain a file system already. In
+ the absence of a partition table, it will put it directly on the
+ disk.
+
+ "none": Put the partition directly on the disk.
+
+ <NUM>: where NUM is the actual partition number.
+
+ <OVERWRITE>: Defines whether or not to overwrite any existing
+ filesystem.
+
+ "true": Indiscriminately destroy any pre-existing file system. Use at
+ your own peril.
+
+ "false": If an existing file system exists, skip the creation.
+
+Behavior Caveat: The default behavior is to _check_ if the file system exists.
+ If a file system matches the specification, then the operation is a no-op.
diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst
index fd4e496d..e2d3312e 100644
--- a/doc/sources/smartos/README.rst
+++ b/doc/sources/smartos/README.rst
@@ -5,11 +5,13 @@ SmartOS Datasource
This datasource finds metadata and user-data from the SmartOS virtualization
platform (i.e. Joyent).
+Please see http://smartos.org/ for information about SmartOS.
+
SmartOS Platform
----------------
-The SmartOS virtualization platform meta-data to the instance via the second
-serial console. On Linux, this is /dev/ttyS1. The data is a provided via a
-simple protocol, where something queries for the userdata, where the console
+The SmartOS virtualization platform uses meta-data to the instance via the
+second serial console. On Linux, this is /dev/ttyS1. The data is a provided
+via a simple protocol: something queries for the data, the console responds
responds with the status and if "SUCCESS" returns until a single ".\n".
New versions of the SmartOS tooling will include support for base64 encoded data.
@@ -18,7 +20,7 @@ Userdata
--------
In SmartOS parlance, user-data is a actually meta-data. This userdata can be
-provided a key-value pairs.
+provided as key-value pairs.
Cloud-init supports reading the traditional meta-data fields supported by the
SmartOS tools. These are:
@@ -36,13 +38,13 @@ user-script
SmartOS traditionally supports sending over a user-script for execution at the
rc.local level. Cloud-init supports running user-scripts as if they were
cloud-init user-data. In this sense, anything with a shell interpreter
-directive will run
+directive will run.
user-data and user-script
-------------------------
In the event that a user defines the meta-data key of "user-data" it will
-always supercede any user-script data. This is for consistency.
+always supersede any user-script data. This is for consistency.
base64
------
@@ -70,3 +72,16 @@ or not to base64 decode something:
* no_base64_decode: This is a configuration setting
(i.e. /etc/cloud/cloud.cfg.d) that sets which values should not be
base64 decoded.
+
+ephemeral_disk:
+---------------
+
+In order to instruct Cloud-init which disk to auto-mount. By default,
+SmartOS only supports a single ephemeral disk.
+
+The default SmartOS configuration will prepare the ephemeral disk and format
+it for you. SmartOS does not, by default, prepare the ephemeral disk for you.
+
+If you change ephemeral_disk, you should also consider changing
+the default disk formatting parameters. See
+doc/examples/cloud-config-disk-setup.txt for information on using this.
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index f53715b0..56fe811e 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -261,6 +261,41 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertEquals(MOCK_RETURNS['enable_motd_sys_info'],
dsrc.metadata['motd_sys_info'])
+ def test_default_ephemeral(self):
+ # Test to make sure that the builtin config has the ephemeral
+ # configuration.
+ dsrc = self._get_ds()
+ cfg = dsrc.get_config_obj()
+
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ assert 'disk_setup' in cfg
+ assert 'fs_setup' in cfg
+ self.assertIsInstance(cfg['disk_setup'], dict)
+ self.assertIsInstance(cfg['fs_setup'], list)
+
+ def test_override_builtin_ds(self):
+ # Test to make sure that the built-in DS is overriden
+ data = {}
+ data['disk_setup'] = {'test_dev': {}}
+ data['fs_setup'] = [{'label': 'test_dev'}]
+ data['serial_device'] = '/dev/ttyS2'
+ dsrc = self._get_ds(ds_cfg=data)
+ cfg = dsrc.get_config_obj()
+
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ assert 'disk_setup' in cfg
+ assert 'fs_setup' in cfg
+ self.assertIsInstance(cfg['disk_setup'], dict)
+ self.assertIsInstance(cfg['fs_setup'], list)
+ assert 'test_dev' in cfg['disk_setup']
+ assert 'test_dev' in cfg['fs_setup'][0]['label']
+
+ self.assertEquals(data['serial_device'], dsrc.seed)
+
def apply_patches(patches):
ret = []