summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog6
-rwxr-xr-xbin/cloud-init6
-rw-r--r--cloudinit/config/cc_growpart.py28
-rw-r--r--cloudinit/config/cc_scripts_vendor.py43
-rw-r--r--cloudinit/handlers/__init__.py4
-rw-r--r--cloudinit/handlers/cloud_config.py2
-rw-r--r--cloudinit/handlers/shell_script.py2
-rw-r--r--cloudinit/helpers.py28
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py2
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py2
-rw-r--r--cloudinit/sources/__init__.py14
-rw-r--r--cloudinit/stages.py118
-rw-r--r--cloudinit/user_data.py6
-rw-r--r--cloudinit/util.py14
-rw-r--r--config/cloud.cfg1
-rw-r--r--config/cloud.cfg.d/05_logging.cfg5
-rw-r--r--doc/examples/cloud-config-growpart.txt4
-rw-r--r--doc/examples/cloud-config-vendor-data.txt16
-rw-r--r--doc/vendordata.txt53
-rw-r--r--tests/unittests/test_data.py (renamed from tests/unittests/test_userdata.py)176
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py8
-rw-r--r--tests/unittests/test_ec2_util.py5
-rw-r--r--tests/unittests/test_handler/test_handler_growpart.py55
-rw-r--r--tests/unittests/test_runs/test_merge_run.py4
-rw-r--r--tests/unittests/test_runs/test_simple_run.py4
26 files changed, 478 insertions, 129 deletions
diff --git a/ChangeLog b/ChangeLog
index 8029f9af..e2bf08b0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -9,6 +9,12 @@
(LP: #1260072).
- Azure: minor changes in logging output. ensure filenames are strings (not
unicode).
+ - config/cloud.cfg.d/05_logging.cfg: provide a default 'output' setting, to
+ redirect cloud-init stderr and stdout /var/log/cloud-init-output.log.
+ - drop support for resizing partitions with parted entirely (LP: #1212492).
+ This was broken as it was anyway.
+ - add support for vendordata.
+ - drop dependency on boto for crawling ec2 metadata service.
0.7.4:
- fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
partitioned block device with target filesystem on ephemeral0.1.
diff --git a/bin/cloud-init b/bin/cloud-init
index b4f9fd07..80a1df05 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -261,8 +261,8 @@ def main_init(name, args):
# Attempt to consume the data per instance.
# This may run user-data handlers and/or perform
# url downloads and such as needed.
- (ran, _results) = init.cloudify().run('consume_userdata',
- init.consume_userdata,
+ (ran, _results) = init.cloudify().run('consume_data',
+ init.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
if not ran:
@@ -271,7 +271,7 @@ def main_init(name, args):
#
# See: https://bugs.launchpad.net/bugs/819507 for a little
# reason behind this...
- init.consume_userdata(PER_ALWAYS)
+ init.consume_data(PER_ALWAYS)
except Exception:
util.logexc(LOG, "Consuming user data failed!")
return 1
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 0dd92a46..6bddf847 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -80,30 +80,6 @@ class ResizeFailedException(Exception):
pass
-class ResizeParted(object):
- def available(self):
- myenv = os.environ.copy()
- myenv['LANG'] = 'C'
-
- try:
- (out, _err) = util.subp(["parted", "--help"], env=myenv)
- if re.search(r"COMMAND.*resizepart\s+", out, re.DOTALL):
- return True
-
- except util.ProcessExecutionError:
- pass
- return False
-
- def resize(self, diskdev, partnum, partdev):
- before = get_size(partdev)
- try:
- util.subp(["parted", diskdev, "resizepart", partnum])
- except util.ProcessExecutionError as e:
- raise ResizeFailedException(e)
-
- return (before, get_size(partdev))
-
-
class ResizeGrowPart(object):
def available(self):
myenv = os.environ.copy()
@@ -279,6 +255,4 @@ def handle(_name, cfg, _cloud, log, _args):
else:
log.debug("'%s' %s: %s" % (entry, action, msg))
-# LP: 1212444 FIXME re-order and favor ResizeParted
-#RESIZERS = (('growpart', ResizeGrowPart),)
-RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted))
+RESIZERS = (('growpart', ResizeGrowPart),)
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
new file mode 100644
index 00000000..0c9e504e
--- /dev/null
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -0,0 +1,43 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+SCRIPT_SUBDIR = 'vendor'
+
+
+def handle(name, cfg, cloud, log, _args):
+ # This is written to by the vendor data handlers
+ # any vendor data shell scripts get placed in runparts_path
+ runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
+ SCRIPT_SUBDIR)
+
+ prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
+
+ try:
+ util.runparts(runparts_path, exe_prefix=prefix)
+ except:
+ log.warn("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
+ raise
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 2ddc75f4..059d7495 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -187,6 +187,10 @@ def _escape_string(text):
def walker_callback(data, filename, payload, headers):
content_type = headers['Content-Type']
+ if content_type in data.get('excluded'):
+ LOG.debug('content_type "%s" is excluded', content_type)
+ return
+
if content_type in PART_CONTENT_TYPES:
walker_handle_handler(data, content_type, filename, payload)
return
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 34a73115..4232700f 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -66,6 +66,8 @@ class CloudConfigPartHandler(handlers.Handler):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.cloud_buf = None
self.cloud_fn = paths.get_ipath("cloud_config")
+ if 'cloud_config_path' in _kwargs:
+ self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
self.file_names = []
def list_types(self):
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index 62289d98..30c1ed89 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -36,6 +36,8 @@ class ShellScriptPartHandler(handlers.Handler):
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.script_dir = paths.get_ipath_cur('scripts')
+ if 'script_path' in _kwargs:
+ self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
def list_types(self):
return [
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index e5eac6a7..e701126e 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -200,11 +200,13 @@ class Runners(object):
class ConfigMerger(object):
def __init__(self, paths=None, datasource=None,
- additional_fns=None, base_cfg=None):
+ additional_fns=None, base_cfg=None,
+ include_vendor=True):
self._paths = paths
self._ds = datasource
self._fns = additional_fns
self._base_cfg = base_cfg
+ self._include_vendor = include_vendor
# Created on first use
self._cfg = None
@@ -237,13 +239,19 @@ class ConfigMerger(object):
# a configuration file to use when running...
if not self._paths:
return i_cfgs
- cc_fn = self._paths.get_ipath_cur('cloud_config')
- if cc_fn and os.path.isfile(cc_fn):
- try:
- i_cfgs.append(util.read_conf(cc_fn))
- except:
- util.logexc(LOG, 'Failed loading of cloud-config from %s',
- cc_fn)
+
+ cc_paths = ['cloud_config']
+ if self._include_vendor:
+ cc_paths.append('vendor_cloud_config')
+
+ for cc_p in cc_paths:
+ cc_fn = self._paths.get_ipath_cur(cc_p)
+ if cc_fn and os.path.isfile(cc_fn):
+ try:
+ i_cfgs.append(util.read_conf(cc_fn))
+ except:
+ util.logexc(LOG, 'Failed loading of cloud-config from %s',
+ cc_fn)
return i_cfgs
def _read_cfg(self):
@@ -331,13 +339,17 @@ class Paths(object):
self.lookups = {
"handlers": "handlers",
"scripts": "scripts",
+ "vendor_scripts": "scripts/vendor",
"sem": "sem",
"boothooks": "boothooks",
"userdata_raw": "user-data.txt",
"userdata": "user-data.txt.i",
"obj_pkl": "obj.pkl",
"cloud_config": "cloud-config.txt",
+ "vendor_cloud_config": "vendor-cloud-config.txt",
"data": "data",
+ "vendordata_raw": "vendor-data.txt",
+ "vendordata": "vendor-data.txt.i",
}
# Set when a datasource becomes active
self.datasource = ds
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 5df7f557..7be2199a 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -52,6 +52,7 @@ CFG_BUILTIN = {
},
'distro': 'ubuntu',
},
+ 'vendor_data': {'enabled': True, 'prefix': []},
}
# Valid frequencies of handlers/modules
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 07dc25ff..b0464cbb 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -323,7 +323,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
(output, _error) = util.subp(cmd, data=bcmd)
# exclude vars in bash that change on their own or that we used
- excluded = ("RANDOM", "LINENO", "_", "__v")
+ excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v")
preset = {}
ret = {}
target = None
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 551b20c4..6593ce6e 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -47,6 +47,7 @@ SMARTOS_ATTRIB_MAP = {
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
'availability_zone': ('datacenter_name', True),
+ 'vendordata': ('sdc:operator-script', False),
}
DS_NAME = 'SmartOS'
@@ -154,6 +155,7 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
+ self.vendordata_raw = md['vendordata']
return True
def device_name_to_device(self, name):
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 7dc1fbde..4b3bf62f 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -53,6 +53,8 @@ class DataSource(object):
self.userdata = None
self.metadata = None
self.userdata_raw = None
+ self.vendordata = None
+ self.vendordata_raw = None
# find the datasource config name.
# remove 'DataSource' from classname on front, and remove 'Net' on end.
@@ -77,9 +79,14 @@ class DataSource(object):
if self.userdata is None:
self.userdata = self.ud_proc.process(self.get_userdata_raw())
if apply_filter:
- return self._filter_userdata(self.userdata)
+ return self._filter_xdata(self.userdata)
return self.userdata
+ def get_vendordata(self):
+ if self.vendordata is None:
+ self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
+ return self.vendordata
+
@property
def launch_index(self):
if not self.metadata:
@@ -88,7 +95,7 @@ class DataSource(object):
return self.metadata['launch-index']
return None
- def _filter_userdata(self, processed_ud):
+ def _filter_xdata(self, processed_ud):
filters = [
launch_index.Filter(util.safe_int(self.launch_index)),
]
@@ -104,6 +111,9 @@ class DataSource(object):
def get_userdata_raw(self):
return self.userdata_raw
+ def get_vendordata_raw(self):
+ return self.vendordata_raw
+
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 07c55802..593b72a2 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -123,6 +123,7 @@ class Init(object):
os.path.join(c_dir, 'scripts', 'per-instance'),
os.path.join(c_dir, 'scripts', 'per-once'),
os.path.join(c_dir, 'scripts', 'per-boot'),
+ os.path.join(c_dir, 'scripts', 'vendor'),
os.path.join(c_dir, 'seed'),
os.path.join(c_dir, 'instances'),
os.path.join(c_dir, 'handlers'),
@@ -319,6 +320,7 @@ class Init(object):
if not self._write_to_cache():
return
self._store_userdata()
+ self._store_vendordata()
def _store_userdata(self):
raw_ud = "%s" % (self.datasource.get_userdata_raw())
@@ -326,11 +328,20 @@ class Init(object):
processed_ud = "%s" % (self.datasource.get_userdata())
util.write_file(self._get_ipath('userdata'), processed_ud, 0600)
- def _default_userdata_handlers(self):
- opts = {
+ def _store_vendordata(self):
+ raw_vd = "%s" % (self.datasource.get_vendordata_raw())
+ util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0600)
+ processed_vd = "%s" % (self.datasource.get_vendordata())
+ util.write_file(self._get_ipath('vendordata'), processed_vd, 0600)
+
+ def _default_handlers(self, opts=None):
+ if opts is None:
+ opts = {}
+
+ opts.update({
'paths': self.paths,
'datasource': self.datasource,
- }
+ })
# TODO(harlowja) Hmmm, should we dynamically import these??
def_handlers = [
cc_part.CloudConfigPartHandler(**opts),
@@ -340,7 +351,23 @@ class Init(object):
]
return def_handlers
- def consume_userdata(self, frequency=PER_INSTANCE):
+ def _default_userdata_handlers(self):
+ return self._default_handlers()
+
+ def _default_vendordata_handlers(self):
+ return self._default_handlers(
+ opts={'script_path': 'vendor_scripts',
+ 'cloud_config_path': 'vendor_cloud_config'})
+
+ def _do_handlers(self, data_msg, c_handlers_list, frequency,
+ excluded=None):
+ """
+ Generalized handlers suitable for use with either vendordata
+ or userdata
+ """
+ if excluded is None:
+ excluded = []
+
cdir = self.paths.get_cpath("handlers")
idir = self._get_ipath("handlers")
@@ -352,12 +379,6 @@ class Init(object):
if d and d not in sys.path:
sys.path.insert(0, d)
- # Ensure datasource fetched before activation (just incase)
- user_data_msg = self.datasource.get_userdata(True)
-
- # This keeps track of all the active handlers
- c_handlers = helpers.ContentHandlers()
-
def register_handlers_in_dir(path):
# Attempts to register any handler modules under the given path.
if not path or not os.path.isdir(path):
@@ -382,13 +403,16 @@ class Init(object):
util.logexc(LOG, "Failed to register handler from %s",
fname)
+ # This keeps track of all the active handlers
+ c_handlers = helpers.ContentHandlers()
+
# Add any handlers in the cloud-dir
register_handlers_in_dir(cdir)
# Register any other handlers that come from the default set. This
# is done after the cloud-dir handlers so that the cdir modules can
# take over the default user-data handler content-types.
- for mod in self._default_userdata_handlers():
+ for mod in c_handlers_list:
types = c_handlers.register(mod, overwrite=False)
if types:
LOG.debug("Added default handler for %s from %s", types, mod)
@@ -406,7 +430,7 @@ class Init(object):
handlers.call_begin(mod, data, frequency)
c_handlers.initialized.append(mod)
- def walk_handlers():
+ def walk_handlers(excluded):
# Walk the user data
part_data = {
'handlers': c_handlers,
@@ -419,9 +443,9 @@ class Init(object):
# to help write there contents to files with numbered
# names...
'handlercount': 0,
+ 'excluded': excluded,
}
- handlers.walk(user_data_msg, handlers.walker_callback,
- data=part_data)
+ handlers.walk(data_msg, handlers.walker_callback, data=part_data)
def finalize_handlers():
# Give callbacks opportunity to finalize
@@ -438,10 +462,16 @@ class Init(object):
try:
init_handlers()
- walk_handlers()
+ walk_handlers(excluded)
finally:
finalize_handlers()
+ def consume_data(self, frequency=PER_INSTANCE):
+ # Consume the userdata first, because we need want to let the part
+ # handlers run first (for merging stuff)
+ self._consume_userdata(frequency)
+ self._consume_vendordata(frequency)
+
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
# this consumed set.
@@ -453,6 +483,64 @@ class Init(object):
# objects before the load of the userdata happened,
# this is expected.
+ def _consume_vendordata(self, frequency=PER_INSTANCE):
+ """
+ Consume the vendordata and run the part handlers on it
+ """
+ # User-data should have been consumed first.
+ # So we merge the other available cloud-configs (everything except
+ # vendor provided), and check whether or not we should consume
+ # vendor data at all. That gives user or system a chance to override.
+ if not self.datasource.get_vendordata_raw():
+ LOG.debug("no vendordata from datasource")
+ return
+
+ _cc_merger = helpers.ConfigMerger(paths=self._paths,
+ datasource=self.datasource,
+ additional_fns=[],
+ base_cfg=self.cfg,
+ include_vendor=False)
+ vdcfg = _cc_merger.cfg.get('vendor_data', {})
+
+ if not isinstance(vdcfg, dict):
+ vdcfg = {'enabled': False}
+ LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg)
+
+ enabled = vdcfg.get('enabled')
+ no_handlers = vdcfg.get('disabled_handlers', None)
+
+ if not util.is_true(enabled):
+ LOG.debug("vendordata consumption is disabled.")
+ return
+
+ LOG.debug("vendor data will be consumed. disabled_handlers=%s",
+ no_handlers)
+
+ # Ensure vendordata source fetched before activation (just incase)
+ vendor_data_msg = self.datasource.get_vendordata()
+
+ # This keeps track of all the active handlers, while excluding what the
+ # users doesn't want run, i.e. boot_hook, cloud_config, shell_script
+ c_handlers_list = self._default_vendordata_handlers()
+
+ # Run the handlers
+ self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
+ excluded=no_handlers)
+
+ def _consume_userdata(self, frequency=PER_INSTANCE):
+ """
+ Consume the userdata and run the part handlers
+ """
+
+ # Ensure datasource fetched before activation (just incase)
+ user_data_msg = self.datasource.get_userdata(True)
+
+ # This keeps track of all the active handlers
+ c_handlers_list = self._default_handlers()
+
+ # Run the handlers
+ self._do_handlers(user_data_msg, c_handlers_list, frequency)
+
class Modules(object):
def __init__(self, init, cfg_files=None):
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index d49ea094..3032ef70 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -88,7 +88,11 @@ class UserDataProcessor(object):
def process(self, blob):
accumulating_msg = MIMEMultipart()
- self._process_msg(convert_string(blob), accumulating_msg)
+ if isinstance(blob, list):
+ for b in blob:
+ self._process_msg(convert_string(b), accumulating_msg)
+ else:
+ self._process_msg(convert_string(blob), accumulating_msg)
return accumulating_msg
def _process_msg(self, base_msg, append_msg):
diff --git a/cloudinit/util.py b/cloudinit/util.py
index a37172dc..3ce54f28 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -608,18 +608,28 @@ def del_dir(path):
shutil.rmtree(path)
-def runparts(dirp, skip_no_exist=True):
+def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if skip_no_exist and not os.path.isdir(dirp):
return
failed = []
attempted = []
+
+ if exe_prefix is None:
+ prefix = []
+ elif isinstance(exe_prefix, str):
+ prefix = [str(exe_prefix)]
+ elif isinstance(exe_prefix, list):
+ prefix = exe_prefix
+ else:
+ raise TypeError("exe_prefix must be None, str, or list")
+
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
attempted.append(exe_path)
try:
- subp([exe_path], capture=False)
+ subp(prefix + [exe_path], capture=False)
except ProcessExecutionError as e:
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
failed.append(e)
diff --git a/config/cloud.cfg b/config/cloud.cfg
index a07cd3b0..b746e3db 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -64,6 +64,7 @@ cloud_config_modules:
# The modules that run in the 'final' stage
cloud_final_modules:
- rightscale_userdata
+ - scripts-vendor
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
diff --git a/config/cloud.cfg.d/05_logging.cfg b/config/cloud.cfg.d/05_logging.cfg
index 410a0650..2e180730 100644
--- a/config/cloud.cfg.d/05_logging.cfg
+++ b/config/cloud.cfg.d/05_logging.cfg
@@ -59,3 +59,8 @@ log_cfgs:
- [ *log_base, *log_file ]
# A file path can also be used
# - /etc/log.conf
+
+# this tells cloud-init to redirect its stdout and stderr to
+# 'tee -a /var/log/cloud-init-output.log' so the user can see output
+# there without needing to look on the console.
+output: {all: '| tee -a /var/log/cloud-init-output.log'}
diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt
index a459573d..393d5164 100644
--- a/doc/examples/cloud-config-growpart.txt
+++ b/doc/examples/cloud-config-growpart.txt
@@ -5,12 +5,10 @@
#
# mode:
# values:
-# * auto: use any option possible (growpart or parted)
+# * auto: use any option possible (any available)
# if none are available, do not warn, but debug.
# * growpart: use growpart to grow partitions
# if growpart is not available, this is an error.
-# * parted: use parted (parted resizepart) to resize partitions
-# if parted is not available, this is an error.
# * off, false
#
# devices:
diff --git a/doc/examples/cloud-config-vendor-data.txt b/doc/examples/cloud-config-vendor-data.txt
new file mode 100644
index 00000000..7f90847b
--- /dev/null
+++ b/doc/examples/cloud-config-vendor-data.txt
@@ -0,0 +1,16 @@
+#cloud-config
+#
+# This explains how to control vendordata via a cloud-config
+#
+# On select Datasources, vendors have a channel for the consumptions
+# of all support user-data types via a special channel called
+# vendordata. Users of the end system are given ultimate control.
+#
+vendor_data:
+ enabled: True
+ prefix: /usr/bin/ltrace
+
+# enabled: whether it is enabled or not
+# prefix: the command to run before any vendor scripts.
+# Note: this is a fairly weak method of containment. It should
+# be used to profile a script, not to prevent its run
diff --git a/doc/vendordata.txt b/doc/vendordata.txt
new file mode 100644
index 00000000..9acbe41c
--- /dev/null
+++ b/doc/vendordata.txt
@@ -0,0 +1,53 @@
+=== Overview ===
+Vendordata is data provided by the entity that launches an instance
+(for example, the cloud provider). This data can be used to
+customize the image to fit into the particular environment it is
+being run in.
+
+Vendordata follows the same rules as user-data, with the following
+caveats:
+ 1. Users have ultimate control over vendordata. They can disable its
+ execution or disable handling of specific parts of multipart input.
+ 2. By default it only runs on first boot
+ 3. Vendordata can be disabled by the user. If the use of vendordata is
+ required for the instance to run, then vendordata should not be
+ used.
+ 4. user supplied cloud-config is merged over cloud-config from
+ vendordata.
+
+Users providing cloud-config data can use the '#cloud-config-jsonp' method
+to more finely control their modifications to the vendor supplied
+cloud-config. For example, if both vendor and user have provided
+'runcnmd' then the default merge handler will cause the user's runcmd to
+override the one provided by the vendor. To append to 'runcmd', the user
+could better provide multipart input with a cloud-config-jsonp part like:
+ #cloud-config-jsonp
+ [{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}]
+
+Further, we strongly advise vendors to not 'be evil'. By evil, we
+mean any action that could compromise a system. Since users trust
+you, please take care to make sure that any vendordata is safe,
+atomic, idempotent and does not put your users at risk.
+
+=== Input Formats ===
+cloud-init will download and cache to filesystem any vendor-data that it
+finds. Vendordata is handled exactly like user-data. That means that
+the vendor can supply multipart input and have those parts acted on
+in the same way as user-data.
+
+The only differences are:
+ * user-scripts are stored in a different location than user-scripts (to
+ avoid namespace collision)
+ * user can disable part handlers by cloud-config settings.
+ For example, to disable handling of 'part-handlers' in vendor-data,
+ the user could provide user-data like this:
+ #cloud-config
+ vendordata: {excluded: 'text/part-handler'}
+
+=== Examples ===
+There are examples in the examples subdirectory.
+Additionally, the 'tools' directory contains 'write-mime-multipart',
+which can be used to easily generate mime-multi-part files from a list
+of input files. That data can then be given to an instance.
+
+See 'write-mime-multipart --help' for usage.
diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_data.py
index 5ffe8f0a..68729c57 100644
--- a/tests/unittests/test_userdata.py
+++ b/tests/unittests/test_data.py
@@ -13,6 +13,7 @@ from email.mime.multipart import MIMEMultipart
from cloudinit import handlers
from cloudinit import helpers as c_helpers
from cloudinit import log
+from cloudinit.settings import (PER_INSTANCE)
from cloudinit import sources
from cloudinit import stages
from cloudinit import util
@@ -24,10 +25,11 @@ from tests.unittests import helpers
class FakeDataSource(sources.DataSource):
- def __init__(self, userdata):
+ def __init__(self, userdata=None, vendordata=None):
sources.DataSource.__init__(self, {}, None, None)
self.metadata = {'instance-id': INSTANCE_ID}
self.userdata_raw = userdata
+ self.vendordata_raw = vendordata
# FIXME: these tests shouldn't be checking log output??
@@ -45,6 +47,11 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
if self._log_handler and self._log:
self._log.removeHandler(self._log_handler)
+ def _patchIn(self, root):
+ self.restore()
+ self.patchOS(root)
+ self.patchUtils(root)
+
def capture_log(self, lvl=logging.DEBUG):
log_file = StringIO.StringIO()
self._log_handler = logging.StreamHandler(log_file)
@@ -68,13 +75,89 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(2, len(cc))
self.assertEquals('qux', cc['baz'])
self.assertEquals('qux2', cc['bar'])
+ def test_simple_jsonp_vendor_and_user(self):
+ # test that user-data wins over vendor
+ user_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/baz", "value": "qux" },
+ { "op": "add", "path": "/bar", "value": "qux2" }
+]
+'''
+ vendor_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/baz", "value": "quxA" },
+ { "op": "add", "path": "/bar", "value": "quxB" },
+ { "op": "add", "path": "/foo", "value": "quxC" }
+]
+'''
+ new_root = self.makeDir()
+ self._patchIn(new_root)
+ initer = stages.Init()
+ initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ _iid = initer.instancify()
+ initer.update()
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
+ mods = stages.Modules(initer)
+ (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ cfg = mods.cfg
+ self.assertIn('vendor_data', cfg)
+ self.assertEquals('qux', cfg['baz'])
+ self.assertEquals('qux2', cfg['bar'])
+ self.assertEquals('quxC', cfg['foo'])
+
+ def test_simple_jsonp_no_vendor_consumed(self):
+ # make sure that vendor data is not consumed
+ user_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/baz", "value": "qux" },
+ { "op": "add", "path": "/bar", "value": "qux2" },
+ { "op": "add", "path": "/vendor_data", "value": {"enabled": "false"}}
+]
+'''
+ vendor_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/baz", "value": "quxA" },
+ { "op": "add", "path": "/bar", "value": "quxB" },
+ { "op": "add", "path": "/foo", "value": "quxC" }
+]
+'''
+ new_root = self.makeDir()
+ self._patchIn(new_root)
+ initer = stages.Init()
+ initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ _iid = initer.instancify()
+ initer.update()
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
+ mods = stages.Modules(initer)
+ (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ cfg = mods.cfg
+ self.assertEquals('qux', cfg['baz'])
+ self.assertEquals('qux2', cfg['bar'])
+ self.assertNotIn('foo', cfg)
+
def test_mixed_cloud_config(self):
blob_cc = '''
#cloud-config
@@ -105,12 +188,87 @@ c: d
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(1, len(cc))
self.assertEquals('c', cc['a'])
+ def test_vendor_user_yaml_cloud_config(self):
+ vendor_blob = '''
+#cloud-config
+a: b
+name: vendor
+run:
+ - x
+ - y
+'''
+
+ user_blob = '''
+#cloud-config
+a: c
+vendor_data:
+ enabled: True
+ prefix: /bin/true
+name: user
+run:
+ - z
+'''
+ new_root = self.makeDir()
+ self._patchIn(new_root)
+ initer = stages.Init()
+ initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ _iid = initer.instancify()
+ initer.update()
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
+ mods = stages.Modules(initer)
+ (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ cfg = mods.cfg
+ self.assertIn('vendor_data', cfg)
+ self.assertEquals('c', cfg['a'])
+ self.assertEquals('user', cfg['name'])
+ self.assertNotIn('x', cfg['run'])
+ self.assertNotIn('y', cfg['run'])
+ self.assertIn('z', cfg['run'])
+
+ def test_vendordata_script(self):
+ vendor_blob = '''
+#!/bin/bash
+echo "test"
+'''
+
+ user_blob = '''
+#cloud-config
+vendor_data:
+ enabled: True
+ prefix: /bin/true
+'''
+ new_root = self.makeDir()
+ self._patchIn(new_root)
+ initer = stages.Init()
+ initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ _iid = initer.instancify()
+ initer.update()
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
+ mods = stages.Modules(initer)
+ (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ _cfg = mods.cfg
+ vendor_script = initer.paths.get_ipath_cur('vendor_scripts')
+ vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)
+ self.assertTrue(os.path.exists(vendor_script_fns))
+
def test_merging_cloud_config(self):
blob = '''
#cloud-config
@@ -185,7 +343,7 @@ p: 1
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertIn(
"Unhandled non-multipart (text/x-not-multipart) userdata:",
log_file.getvalue())
@@ -221,7 +379,7 @@ c: 4
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
contents = util.load_file(ci.paths.get_ipath("cloud_config"))
contents = util.load_yaml(contents)
self.assertTrue(isinstance(contents, dict))
@@ -244,7 +402,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertIn(
"Unhandled unknown content-type (text/plain)",
log_file.getvalue())
@@ -264,7 +422,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_x_shellscript(self):
@@ -284,7 +442,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_plain_shell(self):
@@ -304,5 +462,5 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertEqual("", log_file.getvalue())
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index e1812a88..6fc5b2ac 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -258,6 +258,14 @@ iface eth0 inet static
''')
+class TestParseShellConfig(MockerTestCase):
+ def test_no_seconds(self):
+ cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
+ # we could test 'sleep 2', but that would make the test run slower.
+ ret = ds.parse_shell_config(cfg)
+ self.assertEqual(ret, {"foo": "bar", "xx": "foo"})
+
+
def populate_context_dir(path, variables):
data = "# Context variables generated by OpenNebula\n"
for (k, v) in variables.iteritems():
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
index 6d25dcf9..dd588aca 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/test_ec2_util.py
@@ -25,7 +25,6 @@ class TestEc2Util(helpers.TestCase):
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
self.assertEquals('', userdata)
-
@hp.activate
def test_userdata_fetch_fail_server_dead(self):
hp.register_uri(hp.GET,
@@ -65,8 +64,6 @@ class TestEc2Util(helpers.TestCase):
status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
status=200, body='0=my-public-key')
- hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
- status=200, body='0=my-public-key')
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/0/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
@@ -76,7 +73,7 @@ class TestEc2Util(helpers.TestCase):
self.assertEquals(1, len(md['public-keys']))
@hp.activate
- def test_metadata_fetch_key(self):
+ def test_metadata_fetch_with_2_keys(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
body="\n".join(['hostname',
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
index c0497e08..996526d3 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -12,50 +12,9 @@ import re
import unittest
# growpart:
-# mode: auto # off, on, auto, 'growpart', 'parted'
+# mode: auto # off, on, auto, 'growpart'
# devices: ['root']
-HELP_PARTED_NO_RESIZE = """
-Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
-Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
-interactive mode.
-
-OPTIONs:
-<SNIP>
-
-COMMANDs:
-<SNIP>
- quit exit program
- rescue START END rescue a lost partition near START
- and END
- resize NUMBER START END resize partition NUMBER and its file
- system
- rm NUMBER delete partition NUMBER
-<SNIP>
-Report bugs to bug-parted@gnu.org
-"""
-
-HELP_PARTED_RESIZE = """
-Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
-Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
-interactive mode.
-
-OPTIONs:
-<SNIP>
-
-COMMANDs:
-<SNIP>
- quit exit program
- rescue START END rescue a lost partition near START
- and END
- resize NUMBER START END resize partition NUMBER and its file
- system
- resizepart NUMBER END resize partition NUMBER
- rm NUMBER delete partition NUMBER
-<SNIP>
-Report bugs to bug-parted@gnu.org
-"""
-
HELP_GROWPART_RESIZE = """
growpart disk partition
rewrite partition table so that partition takes up all the space it can
@@ -122,11 +81,8 @@ class TestConfig(MockerTestCase):
# Order must be correct
self.mocker.order()
- @unittest.skip("until LP: #1212444 fixed")
def test_no_resizers_auto_is_fine(self):
subp = self.mocker.replace(util.subp, passthrough=False)
- subp(['parted', '--help'], env={'LANG': 'C'})
- self.mocker.result((HELP_PARTED_NO_RESIZE, ""))
subp(['growpart', '--help'], env={'LANG': 'C'})
self.mocker.result((HELP_GROWPART_NO_RESIZE, ""))
self.mocker.replay()
@@ -144,15 +100,14 @@ class TestConfig(MockerTestCase):
self.assertRaises(ValueError, self.handle, self.name, config,
self.cloud_init, self.log, self.args)
- @unittest.skip("until LP: #1212444 fixed")
- def test_mode_auto_prefers_parted(self):
+ def test_mode_auto_prefers_growpart(self):
subp = self.mocker.replace(util.subp, passthrough=False)
- subp(['parted', '--help'], env={'LANG': 'C'})
- self.mocker.result((HELP_PARTED_RESIZE, ""))
+ subp(['growpart', '--help'], env={'LANG': 'C'})
+ self.mocker.result((HELP_GROWPART_RESIZE, ""))
self.mocker.replay()
ret = cc_growpart.resizer_factory(mode="auto")
- self.assertTrue(isinstance(ret, cc_growpart.ResizeParted))
+ self.assertTrue(isinstance(ret, cc_growpart.ResizeGrowPart))
def test_handle_with_no_growpart_entry(self):
#if no 'growpart' entry in config, then mode=auto should be used
diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py
index d9c3a455..5ffe95a2 100644
--- a/tests/unittests/test_runs/test_merge_run.py
+++ b/tests/unittests/test_runs/test_merge_run.py
@@ -35,8 +35,8 @@ class TestMergeRun(helpers.FilesystemMockingTestCase):
initer.datasource.userdata_raw = ud
_iid = initer.instancify()
initer.update()
- initer.cloudify().run('consume_userdata',
- initer.consume_userdata,
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mirrors = initer.distro.get_option('package_mirrors')
diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py
index 60ef812a..9a7178d1 100644
--- a/tests/unittests/test_runs/test_simple_run.py
+++ b/tests/unittests/test_runs/test_simple_run.py
@@ -66,8 +66,8 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.update()
self.assertTrue(os.path.islink("var/lib/cloud/instance"))
- initer.cloudify().run('consume_userdata',
- initer.consume_userdata,
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)