summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2014-01-17 15:30:04 -0500
committerScott Moser <smoser@ubuntu.com>2014-01-17 15:30:04 -0500
commit605335f4732246ef89b94dcc542e73f13fdef1c9 (patch)
tree7e0ac9e33408102a04342db5b54ee0a520ef64ec /cloudinit
parent98fd17c55b637f4e1d136c954567c1d9b23e6c20 (diff)
parent1729b161c7569ec60ac6102a046e0b8c22457b7c (diff)
downloadvyos-cloud-init-605335f4732246ef89b94dcc542e73f13fdef1c9.tar.gz
vyos-cloud-init-605335f4732246ef89b94dcc542e73f13fdef1c9.zip
initial vendordata support
This adds the ability for a datasource to provide "vendordata". The difference here is that vendordata is from the vendor (cloud provider) where user-data is from the user. By enabling this channel, the vendor can have input on how the instance is set up without modifying or needing to understand the user-data. vendordata is generally consumed exactly like user-data, but the user has the ability to disable its consumption. The only datasource supporting this at the moment is SmartOS.
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/config/cc_scripts_vendor.py43
-rw-r--r--cloudinit/handlers/__init__.py4
-rw-r--r--cloudinit/handlers/cloud_config.py2
-rw-r--r--cloudinit/handlers/shell_script.py2
-rw-r--r--cloudinit/helpers.py28
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py2
-rw-r--r--cloudinit/sources/__init__.py14
-rw-r--r--cloudinit/stages.py118
-rw-r--r--cloudinit/user_data.py6
-rw-r--r--cloudinit/util.py14
11 files changed, 206 insertions, 28 deletions
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
new file mode 100644
index 00000000..0c9e504e
--- /dev/null
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -0,0 +1,43 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+SCRIPT_SUBDIR = 'vendor'
+
+
+def handle(name, cfg, cloud, log, _args):
+ # This is written to by the vendor data handlers
+ # any vendor data shell scripts get placed in runparts_path
+ runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
+ SCRIPT_SUBDIR)
+
+ prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
+
+ try:
+ util.runparts(runparts_path, exe_prefix=prefix)
+ except:
+ log.warn("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
+ raise
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 2ddc75f4..059d7495 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -187,6 +187,10 @@ def _escape_string(text):
def walker_callback(data, filename, payload, headers):
content_type = headers['Content-Type']
+ if content_type in data.get('excluded'):
+ LOG.debug('content_type "%s" is excluded', content_type)
+ return
+
if content_type in PART_CONTENT_TYPES:
walker_handle_handler(data, content_type, filename, payload)
return
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 34a73115..4232700f 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -66,6 +66,8 @@ class CloudConfigPartHandler(handlers.Handler):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.cloud_buf = None
self.cloud_fn = paths.get_ipath("cloud_config")
+ if 'cloud_config_path' in _kwargs:
+ self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
self.file_names = []
def list_types(self):
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index 62289d98..30c1ed89 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -36,6 +36,8 @@ class ShellScriptPartHandler(handlers.Handler):
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.script_dir = paths.get_ipath_cur('scripts')
+ if 'script_path' in _kwargs:
+ self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
def list_types(self):
return [
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index e5eac6a7..e701126e 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -200,11 +200,13 @@ class Runners(object):
class ConfigMerger(object):
def __init__(self, paths=None, datasource=None,
- additional_fns=None, base_cfg=None):
+ additional_fns=None, base_cfg=None,
+ include_vendor=True):
self._paths = paths
self._ds = datasource
self._fns = additional_fns
self._base_cfg = base_cfg
+ self._include_vendor = include_vendor
# Created on first use
self._cfg = None
@@ -237,13 +239,19 @@ class ConfigMerger(object):
# a configuration file to use when running...
if not self._paths:
return i_cfgs
- cc_fn = self._paths.get_ipath_cur('cloud_config')
- if cc_fn and os.path.isfile(cc_fn):
- try:
- i_cfgs.append(util.read_conf(cc_fn))
- except:
- util.logexc(LOG, 'Failed loading of cloud-config from %s',
- cc_fn)
+
+ cc_paths = ['cloud_config']
+ if self._include_vendor:
+ cc_paths.append('vendor_cloud_config')
+
+ for cc_p in cc_paths:
+ cc_fn = self._paths.get_ipath_cur(cc_p)
+ if cc_fn and os.path.isfile(cc_fn):
+ try:
+ i_cfgs.append(util.read_conf(cc_fn))
+ except:
+ util.logexc(LOG, 'Failed loading of cloud-config from %s',
+ cc_fn)
return i_cfgs
def _read_cfg(self):
@@ -331,13 +339,17 @@ class Paths(object):
self.lookups = {
"handlers": "handlers",
"scripts": "scripts",
+ "vendor_scripts": "scripts/vendor",
"sem": "sem",
"boothooks": "boothooks",
"userdata_raw": "user-data.txt",
"userdata": "user-data.txt.i",
"obj_pkl": "obj.pkl",
"cloud_config": "cloud-config.txt",
+ "vendor_cloud_config": "vendor-cloud-config.txt",
"data": "data",
+ "vendordata_raw": "vendor-data.txt",
+ "vendordata": "vendor-data.txt.i",
}
# Set when a datasource becomes active
self.datasource = ds
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 5df7f557..7be2199a 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -52,6 +52,7 @@ CFG_BUILTIN = {
},
'distro': 'ubuntu',
},
+ 'vendor_data': {'enabled': True, 'prefix': []},
}
# Valid frequencies of handlers/modules
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 551b20c4..6593ce6e 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -47,6 +47,7 @@ SMARTOS_ATTRIB_MAP = {
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
'availability_zone': ('datacenter_name', True),
+ 'vendordata': ('sdc:operator-script', False),
}
DS_NAME = 'SmartOS'
@@ -154,6 +155,7 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
+ self.vendordata_raw = md['vendordata']
return True
def device_name_to_device(self, name):
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 7dc1fbde..4b3bf62f 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -53,6 +53,8 @@ class DataSource(object):
self.userdata = None
self.metadata = None
self.userdata_raw = None
+ self.vendordata = None
+ self.vendordata_raw = None
# find the datasource config name.
# remove 'DataSource' from classname on front, and remove 'Net' on end.
@@ -77,9 +79,14 @@ class DataSource(object):
if self.userdata is None:
self.userdata = self.ud_proc.process(self.get_userdata_raw())
if apply_filter:
- return self._filter_userdata(self.userdata)
+ return self._filter_xdata(self.userdata)
return self.userdata
+ def get_vendordata(self):
+ if self.vendordata is None:
+ self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
+ return self.vendordata
+
@property
def launch_index(self):
if not self.metadata:
@@ -88,7 +95,7 @@ class DataSource(object):
return self.metadata['launch-index']
return None
- def _filter_userdata(self, processed_ud):
+ def _filter_xdata(self, processed_ud):
filters = [
launch_index.Filter(util.safe_int(self.launch_index)),
]
@@ -104,6 +111,9 @@ class DataSource(object):
def get_userdata_raw(self):
return self.userdata_raw
+ def get_vendordata_raw(self):
+ return self.vendordata_raw
+
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 07c55802..593b72a2 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -123,6 +123,7 @@ class Init(object):
os.path.join(c_dir, 'scripts', 'per-instance'),
os.path.join(c_dir, 'scripts', 'per-once'),
os.path.join(c_dir, 'scripts', 'per-boot'),
+ os.path.join(c_dir, 'scripts', 'vendor'),
os.path.join(c_dir, 'seed'),
os.path.join(c_dir, 'instances'),
os.path.join(c_dir, 'handlers'),
@@ -319,6 +320,7 @@ class Init(object):
if not self._write_to_cache():
return
self._store_userdata()
+ self._store_vendordata()
def _store_userdata(self):
raw_ud = "%s" % (self.datasource.get_userdata_raw())
@@ -326,11 +328,20 @@ class Init(object):
processed_ud = "%s" % (self.datasource.get_userdata())
util.write_file(self._get_ipath('userdata'), processed_ud, 0600)
- def _default_userdata_handlers(self):
- opts = {
+ def _store_vendordata(self):
+ raw_vd = "%s" % (self.datasource.get_vendordata_raw())
+ util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0600)
+ processed_vd = "%s" % (self.datasource.get_vendordata())
+ util.write_file(self._get_ipath('vendordata'), processed_vd, 0600)
+
+ def _default_handlers(self, opts=None):
+ if opts is None:
+ opts = {}
+
+ opts.update({
'paths': self.paths,
'datasource': self.datasource,
- }
+ })
# TODO(harlowja) Hmmm, should we dynamically import these??
def_handlers = [
cc_part.CloudConfigPartHandler(**opts),
@@ -340,7 +351,23 @@ class Init(object):
]
return def_handlers
- def consume_userdata(self, frequency=PER_INSTANCE):
+ def _default_userdata_handlers(self):
+ return self._default_handlers()
+
+ def _default_vendordata_handlers(self):
+ return self._default_handlers(
+ opts={'script_path': 'vendor_scripts',
+ 'cloud_config_path': 'vendor_cloud_config'})
+
+ def _do_handlers(self, data_msg, c_handlers_list, frequency,
+ excluded=None):
+ """
+ Generalized handlers suitable for use with either vendordata
+ or userdata
+ """
+ if excluded is None:
+ excluded = []
+
cdir = self.paths.get_cpath("handlers")
idir = self._get_ipath("handlers")
@@ -352,12 +379,6 @@ class Init(object):
if d and d not in sys.path:
sys.path.insert(0, d)
- # Ensure datasource fetched before activation (just incase)
- user_data_msg = self.datasource.get_userdata(True)
-
- # This keeps track of all the active handlers
- c_handlers = helpers.ContentHandlers()
-
def register_handlers_in_dir(path):
# Attempts to register any handler modules under the given path.
if not path or not os.path.isdir(path):
@@ -382,13 +403,16 @@ class Init(object):
util.logexc(LOG, "Failed to register handler from %s",
fname)
+ # This keeps track of all the active handlers
+ c_handlers = helpers.ContentHandlers()
+
# Add any handlers in the cloud-dir
register_handlers_in_dir(cdir)
# Register any other handlers that come from the default set. This
# is done after the cloud-dir handlers so that the cdir modules can
# take over the default user-data handler content-types.
- for mod in self._default_userdata_handlers():
+ for mod in c_handlers_list:
types = c_handlers.register(mod, overwrite=False)
if types:
LOG.debug("Added default handler for %s from %s", types, mod)
@@ -406,7 +430,7 @@ class Init(object):
handlers.call_begin(mod, data, frequency)
c_handlers.initialized.append(mod)
- def walk_handlers():
+ def walk_handlers(excluded):
# Walk the user data
part_data = {
'handlers': c_handlers,
@@ -419,9 +443,9 @@ class Init(object):
# to help write there contents to files with numbered
# names...
'handlercount': 0,
+ 'excluded': excluded,
}
- handlers.walk(user_data_msg, handlers.walker_callback,
- data=part_data)
+ handlers.walk(data_msg, handlers.walker_callback, data=part_data)
def finalize_handlers():
# Give callbacks opportunity to finalize
@@ -438,10 +462,16 @@ class Init(object):
try:
init_handlers()
- walk_handlers()
+ walk_handlers(excluded)
finally:
finalize_handlers()
+ def consume_data(self, frequency=PER_INSTANCE):
+ # Consume the userdata first, because we need want to let the part
+ # handlers run first (for merging stuff)
+ self._consume_userdata(frequency)
+ self._consume_vendordata(frequency)
+
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
# this consumed set.
@@ -453,6 +483,64 @@ class Init(object):
# objects before the load of the userdata happened,
# this is expected.
+ def _consume_vendordata(self, frequency=PER_INSTANCE):
+ """
+ Consume the vendordata and run the part handlers on it
+ """
+ # User-data should have been consumed first.
+ # So we merge the other available cloud-configs (everything except
+ # vendor provided), and check whether or not we should consume
+ # vendor data at all. That gives user or system a chance to override.
+ if not self.datasource.get_vendordata_raw():
+ LOG.debug("no vendordata from datasource")
+ return
+
+ _cc_merger = helpers.ConfigMerger(paths=self._paths,
+ datasource=self.datasource,
+ additional_fns=[],
+ base_cfg=self.cfg,
+ include_vendor=False)
+ vdcfg = _cc_merger.cfg.get('vendor_data', {})
+
+ if not isinstance(vdcfg, dict):
+ vdcfg = {'enabled': False}
+ LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg)
+
+ enabled = vdcfg.get('enabled')
+ no_handlers = vdcfg.get('disabled_handlers', None)
+
+ if not util.is_true(enabled):
+ LOG.debug("vendordata consumption is disabled.")
+ return
+
+ LOG.debug("vendor data will be consumed. disabled_handlers=%s",
+ no_handlers)
+
+ # Ensure vendordata source fetched before activation (just incase)
+ vendor_data_msg = self.datasource.get_vendordata()
+
+ # This keeps track of all the active handlers, while excluding what the
+ # users doesn't want run, i.e. boot_hook, cloud_config, shell_script
+ c_handlers_list = self._default_vendordata_handlers()
+
+ # Run the handlers
+ self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
+ excluded=no_handlers)
+
+ def _consume_userdata(self, frequency=PER_INSTANCE):
+ """
+ Consume the userdata and run the part handlers
+ """
+
+ # Ensure datasource fetched before activation (just incase)
+ user_data_msg = self.datasource.get_userdata(True)
+
+ # This keeps track of all the active handlers
+ c_handlers_list = self._default_handlers()
+
+ # Run the handlers
+ self._do_handlers(user_data_msg, c_handlers_list, frequency)
+
class Modules(object):
def __init__(self, init, cfg_files=None):
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index d49ea094..3032ef70 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -88,7 +88,11 @@ class UserDataProcessor(object):
def process(self, blob):
accumulating_msg = MIMEMultipart()
- self._process_msg(convert_string(blob), accumulating_msg)
+ if isinstance(blob, list):
+ for b in blob:
+ self._process_msg(convert_string(b), accumulating_msg)
+ else:
+ self._process_msg(convert_string(blob), accumulating_msg)
return accumulating_msg
def _process_msg(self, base_msg, append_msg):
diff --git a/cloudinit/util.py b/cloudinit/util.py
index a37172dc..3ce54f28 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -608,18 +608,28 @@ def del_dir(path):
shutil.rmtree(path)
-def runparts(dirp, skip_no_exist=True):
+def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if skip_no_exist and not os.path.isdir(dirp):
return
failed = []
attempted = []
+
+ if exe_prefix is None:
+ prefix = []
+ elif isinstance(exe_prefix, str):
+ prefix = [str(exe_prefix)]
+ elif isinstance(exe_prefix, list):
+ prefix = exe_prefix
+ else:
+ raise TypeError("exe_prefix must be None, str, or list")
+
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
attempted.append(exe_path)
try:
- subp([exe_path], capture=False)
+ subp(prefix + [exe_path], capture=False)
except ProcessExecutionError as e:
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
failed.append(e)