From 47d15924ea38f249a2c54d33dd24c3c284c4eb72 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 9 Oct 2013 12:22:06 -0700 Subject: Log message around import failure In certain cases import failure is expected and in certain cases it is not expected, in either case it is useful to at least log the failure. --- cloudinit/importer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/importer.py b/cloudinit/importer.py index 71cf2726..a094141a 100644 --- a/cloudinit/importer.py +++ b/cloudinit/importer.py @@ -36,6 +36,7 @@ def find_module(base_name, search_paths, required_attrs=None): found_places = [] if not required_attrs: required_attrs = [] + # NOTE(harlowja): translate the search paths to include the base name. real_paths = [] for path in search_paths: real_path = [] @@ -50,8 +51,9 @@ def find_module(base_name, search_paths, required_attrs=None): mod = None try: mod = import_module(full_path) - except ImportError: - pass + except ImportError as e: + LOG.debug("Failed at attempted import of '%s' due to: %s", + full_path, e) if not mod: continue found_attrs = 0 -- cgit v1.2.3 From 9c762cc3fa13782397a15bd3c68e9c62a3cba689 Mon Sep 17 00:00:00 2001 From: Shraddha Pandhe Date: Fri, 6 Dec 2013 11:16:17 -0800 Subject: This is a new debug module thats supposed to print out some information about the instance being created. The module can be included at any stage of the process - init/config/final LP: #1258619 --- cloudinit/config/cc_debug.py | 82 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 cloudinit/config/cc_debug.py (limited to 'cloudinit') diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py new file mode 100644 index 00000000..86c61d68 --- /dev/null +++ b/cloudinit/config/cc_debug.py @@ -0,0 +1,82 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Yahoo! Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from StringIO import StringIO + +from cloudinit import util + +import copy + +import yaml + + +def _format_yaml(obj): + try: + formatted = yaml.safe_dump(obj, + line_break="\n", + indent=4, + explicit_start=True, + explicit_end=True, + default_flow_style=False) + return formatted.strip() + except: + return "???" + + +def _make_header(text): + header = StringIO() + header.write("-" * 80) + header.write("\n") + header.write(text.center(80, ' ')) + header.write("\n") + header.write("-" * 80) + header.write("\n") + return header.getvalue() + + +def handle(name, cfg, cloud, log, _args): + verbose = util.get_cfg_option_bool(cfg, 'verbose', default=True) + if not verbose: + log.debug(("Skipping module named %s," + " verbose printing disabled"), name) + return + # Clean out some keys that we just don't care about showing... + dump_cfg = copy.deepcopy(cfg) + for k in ['log_cfgs']: + dump_cfg.pop(k, None) + all_keys = list(dump_cfg.keys()) + for k in all_keys: + if k.startswith("_"): + dump_cfg.pop(k, None) + # Now dump it... + to_print = StringIO() + to_print.write(_make_header("Config")) + to_print.write(_format_yaml(dump_cfg)) + to_print.write("\n") + to_print.write(_make_header("MetaData")) + to_print.write(_format_yaml(cloud.datasource.metadata)) + to_print.write("\n") + to_print.write(_make_header("Misc")) + to_print.write("Datasource: %s\n" % (util.obj_name(cloud.datasource))) + to_print.write("Distro: %s\n" % (util.obj_name(cloud.distro))) + to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) + to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) + to_print.write("Locale: %s\n" % (cloud.get_locale())) + to_print.write("Launch IDX: %s\n" % (cloud.launch_index)) + contents = to_print.getvalue() + for line in contents.splitlines(): + line = "ci-info: %s\n" % (line) + util.multi_log(line, console=True, stderr=False) -- cgit v1.2.3 From c0d263968d93e44fbaaa98d284690ac93a6ce024 Mon Sep 17 00:00:00 2001 From: Shraddha Pandhe Date: Wed, 11 Dec 2013 14:21:18 -0800 Subject: bug: 1258619 added namespace for config options, output file, commandline argument for output file --- cloudinit/config/cc_debug.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 86c61d68..85dc5c58 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -15,11 +15,9 @@ # along with this program. If not, see . from StringIO import StringIO - from cloudinit import util - +from cloudinit import type_utils import copy - import yaml @@ -47,12 +45,17 @@ def _make_header(text): return header.getvalue() -def handle(name, cfg, cloud, log, _args): - verbose = util.get_cfg_option_bool(cfg, 'verbose', default=True) +def handle(name, cfg, cloud, log, args): + verbose = util.get_cfg_by_path(cfg, ('debug','verbose'), default=True) if not verbose: log.debug(("Skipping module named %s," " verbose printing disabled"), name) return + out_file = None + if args: + out_file = args[0] + else: + out_file = util.get_cfg_by_path(cfg, ('debug','output')) # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) for k in ['log_cfgs']: @@ -70,8 +73,8 @@ def handle(name, cfg, cloud, log, _args): to_print.write(_format_yaml(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) - to_print.write("Datasource: %s\n" % (util.obj_name(cloud.datasource))) - to_print.write("Distro: %s\n" % (util.obj_name(cloud.distro))) + to_print.write("Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))) + to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) to_print.write("Locale: %s\n" % (cloud.get_locale())) @@ -79,4 +82,7 @@ def handle(name, cfg, cloud, log, _args): contents = to_print.getvalue() for line in contents.splitlines(): line = "ci-info: %s\n" % (line) - util.multi_log(line, console=True, stderr=False) + if out_file: + util.write_file(out_file, line, 0644, "a") + else: + util.multi_log(line, console=True, stderr=False) -- cgit v1.2.3 From 7c6f2de4f7f66874d6c9131c04cb84637955e5ce Mon Sep 17 00:00:00 2001 From: Shraddha Pandhe Date: Wed, 11 Dec 2013 15:41:14 -0800 Subject: essage --- cloudinit/config/cc_debug.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 85dc5c58..a3d99da8 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -67,10 +67,10 @@ def handle(name, cfg, cloud, log, args): # Now dump it... to_print = StringIO() to_print.write(_make_header("Config")) - to_print.write(_format_yaml(dump_cfg)) + to_print.write(util.yaml_dumps(dump_cfg)) to_print.write("\n") to_print.write(_make_header("MetaData")) - to_print.write(_format_yaml(cloud.datasource.metadata)) + to_print.write(util.yaml_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) to_print.write("Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))) @@ -80,9 +80,11 @@ def handle(name, cfg, cloud, log, args): to_print.write("Locale: %s\n" % (cloud.get_locale())) to_print.write("Launch IDX: %s\n" % (cloud.launch_index)) contents = to_print.getvalue() + content_to_file = [] for line in contents.splitlines(): line = "ci-info: %s\n" % (line) - if out_file: - util.write_file(out_file, line, 0644, "a") - else: - util.multi_log(line, console=True, stderr=False) + content_to_file.append(line) + if out_file: + util.write_file(out_file, "".join(content_to_file), 0644, "w") + else: + util.multi_log("".join(content_to_file), console=True, stderr=False) -- cgit v1.2.3 From bce8220f1688af1e155661bfd57e73fe23c42522 Mon Sep 17 00:00:00 2001 From: Shraddha Pandhe Date: Wed, 11 Dec 2013 15:51:40 -0800 Subject: Removed method _format_yaml --- cloudinit/config/cc_debug.py | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index a3d99da8..971af71b 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -21,19 +21,6 @@ import copy import yaml -def _format_yaml(obj): - try: - formatted = yaml.safe_dump(obj, - line_break="\n", - indent=4, - explicit_start=True, - explicit_end=True, - default_flow_style=False) - return formatted.strip() - except: - return "???" - - def _make_header(text): header = StringIO() header.write("-" * 80) -- cgit v1.2.3 From 67ede943d3a02878061be2314300644c636b14f8 Mon Sep 17 00:00:00 2001 From: Shraddha Pandhe Date: Wed, 11 Dec 2013 15:56:08 -0800 Subject: Removed yaml import --- cloudinit/config/cc_debug.py | 1 - 1 file changed, 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 971af71b..c0a447cb 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -18,7 +18,6 @@ from StringIO import StringIO from cloudinit import util from cloudinit import type_utils import copy -import yaml def _make_header(text): -- cgit v1.2.3 From 5414797f1b9286ddbe82c8637dfff74cf352b967 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 12 Dec 2013 11:47:53 -0500 Subject: fix pep8 and pylint warnings This fixes warnings raised by: ./tools/run-pep8 cloudinit/config/cc_debug.py ./tools/run-pylint cloudinit/config/cc_debug.py --- cloudinit/config/cc_debug.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index c0a447cb..55fdf2dd 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -32,7 +32,7 @@ def _make_header(text): def handle(name, cfg, cloud, log, args): - verbose = util.get_cfg_by_path(cfg, ('debug','verbose'), default=True) + verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) if not verbose: log.debug(("Skipping module named %s," " verbose printing disabled"), name) @@ -41,7 +41,7 @@ def handle(name, cfg, cloud, log, args): if args: out_file = args[0] else: - out_file = util.get_cfg_by_path(cfg, ('debug','output')) + out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) for k in ['log_cfgs']: @@ -59,7 +59,8 @@ def handle(name, cfg, cloud, log, args): to_print.write(util.yaml_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) - to_print.write("Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))) + to_print.write("Datasource: %s\n" % + (type_utils.obj_name(cloud.datasource))) to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) -- cgit v1.2.3 From fd5231ae771cd3b87c26ac2b0839fb672bf0acee Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 12 Dec 2013 11:50:55 -0500 Subject: be verbose explicitly if run from cmdline. Let the command line (or module args) that set outfile explicitly override a config'd value of 'verbose'. Ie, if /etc/cloud/cloud.cfg.d/my.cfg had: debug: verbose: False but the user ran: cloud-init single --frequency=always --name=debug output.txt Then they probably wanted to have the debug in output.txt even though verbose was configured to False. --- cloudinit/config/cc_debug.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 55fdf2dd..cfd31fa1 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -33,15 +33,17 @@ def _make_header(text): def handle(name, cfg, cloud, log, args): verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) - if not verbose: - log.debug(("Skipping module named %s," - " verbose printing disabled"), name) - return - out_file = None if args: + # if args are provided (from cmdline) then explicitly set verbose out_file = args[0] + verbose = True else: out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) + + if not verbose: + log.debug(("Skipping module named %s," + " verbose printing disabled"), name) + return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) for k in ['log_cfgs']: -- cgit v1.2.3 From 66ed882dc3cf24395386869ea3c1d54e8f22c38b Mon Sep 17 00:00:00 2001 From: James Slagle Date: Fri, 13 Dec 2013 10:40:51 -0500 Subject: Cast file path to string. Before passing a path into selinux.matchpathcon, it needs to be casted to a string, since the path could be unicode and selinux.matchpathcon does not support unicode. Closes-Bug: #1260072 LP: #1260072 --- cloudinit/util.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index a8ddb390..a37172dc 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -170,6 +170,8 @@ class SeLinuxGuard(object): def __exit__(self, excp_type, excp_value, excp_traceback): if self.selinux and self.selinux.is_selinux_enabled(): path = os.path.realpath(os.path.expanduser(self.path)) + # path should be a string, not unicode + path = str(path) do_restore = False try: # See if even worth restoring?? -- cgit v1.2.3 From 923a7d0efc3cf3224b891dc783321018d59ce33e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 14 Dec 2013 14:17:31 -0500 Subject: support calling apt with eatmydata, enable by default if available. This allows a general config option to prefix apt-get commands via 'apt_get_wrapper'. By default, the command is set to 'eatmydata', and the mode set to 'auto'. That means if eatmydata is available (via which), it will use it. The 'command' can be either a array or a string. LP: #1236531 --- ChangeLog | 2 ++ cloudinit/distros/debian.py | 28 ++++++++++++++++++++++++++-- doc/examples/cloud-config.txt | 9 +++++++-- 3 files changed, 35 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index 1f4ca198..1b7948f7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -3,6 +3,8 @@ - Add a debug log message around import failures - add a 'debug' module for easily printing out some information about datasource and cloud-init [Shraddha Pandhe] + - support running apt with 'eatmydata' via configuration token + apt_get_wrapper (LP: #1236531). 0.7.4: - fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a partitioned block device with target filesystem on ephemeral0.1. diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 8fe49cbe..1ae232fd 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -36,6 +36,10 @@ LOG = logging.getLogger(__name__) APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold', '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet') +APT_GET_WRAPPER = { + 'command': 'eatmydata', + 'enabled': 'auto', +} class Distro(distros.Distro): @@ -148,7 +152,13 @@ class Distro(distros.Distro): # See: http://tiny.cc/kg91fw # Or: http://tiny.cc/mh91fw e['DEBIAN_FRONTEND'] = 'noninteractive' - cmd = list(self.get_option("apt_get_command", APT_GET_COMMAND)) + + wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER) + cmd = _get_wrapper_prefix( + wcfg.get('command', APT_GET_WRAPPER['command']), + wcfg.get('enabled', APT_GET_WRAPPER['enabled'])) + + cmd.extend(list(self.get_option("apt_get_command", APT_GET_COMMAND))) if args and isinstance(args, str): cmd.append(args) @@ -166,7 +176,9 @@ class Distro(distros.Distro): cmd.extend(pkglist) # Allow the output of this to flow outwards (ie not be captured) - util.subp(cmd, env=e, capture=False) + util.log_time(logfunc=LOG.debug, + msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp, + args=(cmd,), kwargs={'env': e, 'capture': False}) def update_package_sources(self): self._runner.run("update-sources", self.package_command, @@ -175,3 +187,15 @@ class Distro(distros.Distro): def get_primary_arch(self): (arch, _err) = util.subp(['dpkg', '--print-architecture']) return str(arch).strip() + + +def _get_wrapper_prefix(cmd, mode): + if isinstance(cmd, str): + cmd = [str(cmd)] + + if (util.is_true(mode) or + (str(mode).lower() == "auto" and cmd[0] and + util.which(cmd[0]))): + return cmd + else: + return [] diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 8d756c61..61fa6065 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -147,8 +147,13 @@ apt_sources: # '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet'] # # apt_get_upgrade_subcommand: -# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'. -# This is the subcommand that is invoked if package_upgrade is set to true above. +# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'. +# This is the subcommand that is invoked if package_upgrade is set to true above. +# +# apt_get_wrapper: +# command: eatmydata +# enabled: [True, False, "auto"] +# # Install additional packages on first boot # -- cgit v1.2.3 From a5727fe1477c9cc4288d1ac41f70bd1ab7d7928a Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Wed, 8 Jan 2014 17:16:24 -0700 Subject: Significant re-working of the userdata handling and introduction of vendordata. Vendordata is a datasource provided userdata-like blob that is parsed similiarly to userdata, execept at the user's pleasure. cloudinit/config/cc_scripts_vendor.py: added vendor script cloud config cloudinit/config/cc_vendor_scripts_per_boot.py: added vendor per boot cloud config cloudinit/config/cc_vendor_scripts_per_instance.py: added vendor per instance vendor cloud config cloudinit/config/cc_vendor_scripts_per_once.py: added per once vendor cloud config script doc/examples/cloud-config-vendor-data.txt: documentation of vendor-data examples doc/vendordata.txt: documentation of vendordata for vendors (RENAMED) tests/unittests/test_userdata.py => tests/unittests/test_userdata.py TO: tests/unittests/test_userdata.py => tests/unittests/test_data.py: userdata test cases are not expanded to confirm superiority over vendor data. bin/cloud-init: change instances of 'consume_userdata' to 'consume_data' cloudinit/handlers/cloud_config.py: Added vendor script handling to default cloud-config modules cloudinit/handlers/shell_script.py: Added ability to change the path key to support vendor provided 'vendor-scripts'. Defaults to 'script'. cloudinit/helpers.py: - Changed ConfigMerger to include handling of vendordata. - Changed helpers to include paths for vendordata. cloudinit/sources/__init__.py: Added functions for helping vendordata - get_vendordata_raw(): returns vendordata unprocessed - get_vendordata(): returns vendordata through userdata processor - has_vendordata(): indicator if vendordata is present - consume_vendordata(): datasource directive for indicating explict user approval of vendordata consumption. Defaults to 'false' cloudinit/stages.py: Re-jiggered for handling of vendordata - _initial_subdirs(): added vendor script definition - update(): added self._store_vendordata() - [ADDED] _store_vendordata(): store vendordata - _get_default_handlers(): modified to allow for filtering which handlers will run against vendordata - [ADDED] _do_handlers(): moved logic from consume_userdata to _do_handlers(). This allows _consume_vendordata() and _consume_userdata() to use the same code path. - [RENAMED] consume_userdata() to _consume_userdata() - [ADDED] _consume_vendordata() for handling vendordata - run after userdata to get user cloud-config - uses ConfigMerger to get the configuration from the instance perspective about whether or not to use vendordata - [ADDED] consume_data() to call _consume_{user,vendor}data cloudinit/util.py: - [ADDED] get_nested_option_as_list() used by cc_vendor* for getting a nested value from a dict and returned as a list - runparts(): added 'exe_prefix' for running exe with a prefix, used by cc_vendor* config/cloud.cfg: Added vendor script execution as default tests/unittests/test_runs/test_merge_run.py: changed consume_userdata() to consume_data() tests/unittests/test_runs/test_simple_run.py: changed consume_userdata() to consume_data() --- bin/cloud-init | 6 +- cloudinit/config/cc_scripts_vendor.py | 44 ++ cloudinit/config/cc_vendor_scripts_per_boot.py | 43 ++ cloudinit/config/cc_vendor_scripts_per_instance.py | 43 ++ cloudinit/config/cc_vendor_scripts_per_once.py | 43 ++ cloudinit/handlers/cloud_config.py | 2 + cloudinit/handlers/shell_script.py | 2 + cloudinit/helpers.py | 29 +- cloudinit/sources/__init__.py | 28 +- cloudinit/stages.py | 158 ++++++- cloudinit/user_data.py | 6 +- cloudinit/util.py | 30 +- config/cloud.cfg | 4 + doc/examples/cloud-config-vendor-data.txt | 16 + doc/vendordata.txt | 93 ++++ tests/unittests/test_data.py | 505 +++++++++++++++++++++ tests/unittests/test_runs/test_merge_run.py | 4 +- tests/unittests/test_runs/test_simple_run.py | 4 +- tests/unittests/test_userdata.py | 308 ------------- 19 files changed, 1024 insertions(+), 344 deletions(-) create mode 100644 cloudinit/config/cc_scripts_vendor.py create mode 100644 cloudinit/config/cc_vendor_scripts_per_boot.py create mode 100644 cloudinit/config/cc_vendor_scripts_per_instance.py create mode 100644 cloudinit/config/cc_vendor_scripts_per_once.py create mode 100644 doc/examples/cloud-config-vendor-data.txt create mode 100644 doc/vendordata.txt create mode 100644 tests/unittests/test_data.py delete mode 100644 tests/unittests/test_userdata.py (limited to 'cloudinit') diff --git a/bin/cloud-init b/bin/cloud-init index b4f9fd07..80a1df05 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -261,8 +261,8 @@ def main_init(name, args): # Attempt to consume the data per instance. # This may run user-data handlers and/or perform # url downloads and such as needed. - (ran, _results) = init.cloudify().run('consume_userdata', - init.consume_userdata, + (ran, _results) = init.cloudify().run('consume_data', + init.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) if not ran: @@ -271,7 +271,7 @@ def main_init(name, args): # # See: https://bugs.launchpad.net/bugs/819507 for a little # reason behind this... - init.consume_userdata(PER_ALWAYS) + init.consume_data(PER_ALWAYS) except Exception: util.logexc(LOG, "Consuming user data failed!") return 1 diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py new file mode 100644 index 00000000..5809a4ba --- /dev/null +++ b/cloudinit/config/cc_scripts_vendor.py @@ -0,0 +1,44 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011-2014 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Ben Howard +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os + +from cloudinit import util + +from cloudinit.settings import PER_INSTANCE + +frequency = PER_INSTANCE + +SCRIPT_SUBDIR = 'vendor' + + +def handle(name, _cfg, cloud, log, _args): + # This is written to by the user data handlers + # Ie, any custom shell scripts that come down + # go here... + runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts', + SCRIPT_SUBDIR) + try: + util.runparts(runparts_path) + except: + log.warn("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) + raise diff --git a/cloudinit/config/cc_vendor_scripts_per_boot.py b/cloudinit/config/cc_vendor_scripts_per_boot.py new file mode 100644 index 00000000..80446e99 --- /dev/null +++ b/cloudinit/config/cc_vendor_scripts_per_boot.py @@ -0,0 +1,43 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011-2014 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Ben Howard +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os + +from cloudinit import util + +from cloudinit.settings import PER_ALWAYS + +frequency = PER_ALWAYS + +SCRIPT_SUBDIR = 'per-boot' + + +def handle(name, cfg, cloud, log, _args): + runparts_path = os.path.join(cloud.get_cpath(), 'scripts', 'vendor', + SCRIPT_SUBDIR) + vendor_prefix = util.get_nested_option_as_list(cfg, 'vendor_data', + 'prefix') + try: + util.runparts(runparts_path, exe_prefix=vendor_prefix) + except: + log.warn("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) + raise diff --git a/cloudinit/config/cc_vendor_scripts_per_instance.py b/cloudinit/config/cc_vendor_scripts_per_instance.py new file mode 100644 index 00000000..2d27a0c4 --- /dev/null +++ b/cloudinit/config/cc_vendor_scripts_per_instance.py @@ -0,0 +1,43 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011-2014 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Ben Howard +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os + +from cloudinit import util + +from cloudinit.settings import PER_INSTANCE + +frequency = PER_INSTANCE + +SCRIPT_SUBDIR = 'per-instance' + + +def handle(name, cfg, cloud, log, _args): + runparts_path = os.path.join(cloud.get_cpath(), 'scripts', 'vendor', + SCRIPT_SUBDIR) + vendor_prefix = util.get_nested_option_as_list(cfg, 'vendor_data', + 'prefix') + try: + util.runparts(runparts_path, exe_prefix=vendor_prefix) + except: + log.warn("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) + raise diff --git a/cloudinit/config/cc_vendor_scripts_per_once.py b/cloudinit/config/cc_vendor_scripts_per_once.py new file mode 100644 index 00000000..ad3e13c8 --- /dev/null +++ b/cloudinit/config/cc_vendor_scripts_per_once.py @@ -0,0 +1,43 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011-2014 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Ben Howard +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os + +from cloudinit import util + +from cloudinit.settings import PER_ONCE + +frequency = PER_ONCE + +SCRIPT_SUBDIR = 'per-once' + + +def handle(name, cfg, cloud, log, _args): + runparts_path = os.path.join(cloud.get_cpath(), 'scripts', 'vendor', + SCRIPT_SUBDIR) + vendor_prefix = util.get_nested_option_as_list(cfg, 'vendor_data', + 'prefix') + try: + util.runparts(runparts_path, exe_prefix=vendor_prefix) + except: + log.warn("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) + raise diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 34a73115..4232700f 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -66,6 +66,8 @@ class CloudConfigPartHandler(handlers.Handler): handlers.Handler.__init__(self, PER_ALWAYS, version=3) self.cloud_buf = None self.cloud_fn = paths.get_ipath("cloud_config") + if 'cloud_config_path' in _kwargs: + self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"]) self.file_names = [] def list_types(self): diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index 62289d98..30c1ed89 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -36,6 +36,8 @@ class ShellScriptPartHandler(handlers.Handler): def __init__(self, paths, **_kwargs): handlers.Handler.__init__(self, PER_ALWAYS) self.script_dir = paths.get_ipath_cur('scripts') + if 'script_path' in _kwargs: + self.script_dir = paths.get_ipath_cur(_kwargs['script_path']) def list_types(self): return [ diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index e5eac6a7..f9da697c 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -200,11 +200,13 @@ class Runners(object): class ConfigMerger(object): def __init__(self, paths=None, datasource=None, - additional_fns=None, base_cfg=None): + additional_fns=None, base_cfg=None, + include_vendor=True): self._paths = paths self._ds = datasource self._fns = additional_fns self._base_cfg = base_cfg + self._include_vendor = include_vendor # Created on first use self._cfg = None @@ -237,13 +239,19 @@ class ConfigMerger(object): # a configuration file to use when running... if not self._paths: return i_cfgs - cc_fn = self._paths.get_ipath_cur('cloud_config') - if cc_fn and os.path.isfile(cc_fn): - try: - i_cfgs.append(util.read_conf(cc_fn)) - except: - util.logexc(LOG, 'Failed loading of cloud-config from %s', - cc_fn) + + cc_paths = ['cloud_config'] + if self._include_vendor: + cc_paths.append('vendor_cloud_config') + + for cc_p in cc_paths: + cc_fn = self._paths.get_ipath_cur(cc_p) + if cc_fn and os.path.isfile(cc_fn): + try: + i_cfgs.append(util.read_conf(cc_fn)) + except: + util.logexc(LOG, 'Failed loading of cloud-config from %s', + cc_fn) return i_cfgs def _read_cfg(self): @@ -331,13 +339,18 @@ class Paths(object): self.lookups = { "handlers": "handlers", "scripts": "scripts", + "vendor_scripts": "scripts/vendor", "sem": "sem", "boothooks": "boothooks", "userdata_raw": "user-data.txt", "userdata": "user-data.txt.i", "obj_pkl": "obj.pkl", "cloud_config": "cloud-config.txt", + "vendor_cloud_config": "vendor-cloud-config.txt", "data": "data", + "vendordata_raw": "vendor-data.txt", + "vendordata": "vendor-data.txt.i", + "mergedvendoruser": "vendor-user-data.txt", } # Set when a datasource becomes active self.datasource = ds diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 7dc1fbde..a7c7993f 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -53,6 +53,8 @@ class DataSource(object): self.userdata = None self.metadata = None self.userdata_raw = None + self.vendordata = None + self.vendordata_raw = None # find the datasource config name. # remove 'DataSource' from classname on front, and remove 'Net' on end. @@ -77,9 +79,28 @@ class DataSource(object): if self.userdata is None: self.userdata = self.ud_proc.process(self.get_userdata_raw()) if apply_filter: - return self._filter_userdata(self.userdata) + return self._filter_xdata(self.userdata) return self.userdata + def get_vendordata(self, apply_filter=False): + if self.vendordata is None: + self.vendordata = self.ud_proc.process(self.get_vendordata_raw()) + if apply_filter: + return self._filter_xdata(self.vendordata) + return self.vendordata + + def has_vendordata(self): + if self.vendordata_raw is not None: + return True + return False + + def consume_vendordata(self): + """ + The datasource may allow for consumption of vendordata, but only + when the datasource has allowed it. The default is false. + """ + return False + @property def launch_index(self): if not self.metadata: @@ -88,7 +109,7 @@ class DataSource(object): return self.metadata['launch-index'] return None - def _filter_userdata(self, processed_ud): + def _filter_xdata(self, processed_ud): filters = [ launch_index.Filter(util.safe_int(self.launch_index)), ] @@ -104,6 +125,9 @@ class DataSource(object): def get_userdata_raw(self): return self.userdata_raw + def get_vendordata_raw(self): + return self.vendordata_raw + # the data sources' config_obj is a cloud-config formated # object that came to it from ways other than cloud-config # because cloud-config content would be handled elsewhere diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 07c55802..043b3257 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -26,7 +26,8 @@ import copy import os import sys -from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG) +from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES, + CLOUD_CONFIG) from cloudinit import handlers @@ -123,6 +124,10 @@ class Init(object): os.path.join(c_dir, 'scripts', 'per-instance'), os.path.join(c_dir, 'scripts', 'per-once'), os.path.join(c_dir, 'scripts', 'per-boot'), + os.path.join(c_dir, 'scripts', 'vendor'), + os.path.join(c_dir, 'scripts', 'vendor', 'per-boot'), + os.path.join(c_dir, 'scripts', 'vendor', 'per-instance'), + os.path.join(c_dir, 'scripts', 'vendor', 'per-once'), os.path.join(c_dir, 'seed'), os.path.join(c_dir, 'instances'), os.path.join(c_dir, 'handlers'), @@ -319,6 +324,7 @@ class Init(object): if not self._write_to_cache(): return self._store_userdata() + self._store_vendordata() def _store_userdata(self): raw_ud = "%s" % (self.datasource.get_userdata_raw()) @@ -326,21 +332,62 @@ class Init(object): processed_ud = "%s" % (self.datasource.get_userdata()) util.write_file(self._get_ipath('userdata'), processed_ud, 0600) - def _default_userdata_handlers(self): + def _store_vendordata(self): + raw_vd = "%s" % (self.datasource.get_vendordata_raw()) + util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0600) + processed_vd = "%s" % (self.datasource.get_vendordata()) + util.write_file(self._get_ipath('vendordata'), processed_vd, 0600) + + def _get_default_handlers(self, user_data=False, vendor_data=False, + excluded=None): opts = { 'paths': self.paths, 'datasource': self.datasource, } + + def conditional_get(cls, mod): + cls_name = cls.__name__.split('.')[-1] + _mod = getattr(cls, mod) + if not excluded: + return _mod(**opts) + + if cls_name not in excluded: + _mod = getattr(cls, mod) + return _mod(**opts) + # TODO(harlowja) Hmmm, should we dynamically import these?? def_handlers = [ - cc_part.CloudConfigPartHandler(**opts), - ss_part.ShellScriptPartHandler(**opts), - bh_part.BootHookPartHandler(**opts), - up_part.UpstartJobPartHandler(**opts), + conditional_get(bh_part, 'BootHookPartHandler'), + conditional_get(up_part, 'UpstartJobPartHandler'), ] - return def_handlers - def consume_userdata(self, frequency=PER_INSTANCE): + # Add in the shell script part handler + if user_data: + def_handlers.extend([ + conditional_get(cc_part, 'CloudConfigPartHandler'), + conditional_get(ss_part, 'ShellScriptPartHandler')]) + + # This changes the path for the vendor script execution + if vendor_data: + opts['script_path'] = "vendor_scripts" + opts['cloud_config_path'] = "vendor_cloud_config" + def_handlers.extend([ + conditional_get(cc_part, 'CloudConfigPartHandler'), + conditional_get(ss_part, 'ShellScriptPartHandler')]) + + return [x for x in def_handlers if x is not None] + + def _default_userdata_handlers(self): + return self._get_default_handlers(user_data=True) + + def _default_vendordata_handlers(self, excluded=None): + return self._get_default_handlers(vendor_data=True, excluded=excluded) + + def _do_handlers(self, data_msg, c_handlers_list, frequency): + """ + Generalized handlers suitable for use with either vendordata + or userdata + """ cdir = self.paths.get_cpath("handlers") idir = self._get_ipath("handlers") @@ -352,12 +399,6 @@ class Init(object): if d and d not in sys.path: sys.path.insert(0, d) - # Ensure datasource fetched before activation (just incase) - user_data_msg = self.datasource.get_userdata(True) - - # This keeps track of all the active handlers - c_handlers = helpers.ContentHandlers() - def register_handlers_in_dir(path): # Attempts to register any handler modules under the given path. if not path or not os.path.isdir(path): @@ -382,13 +423,16 @@ class Init(object): util.logexc(LOG, "Failed to register handler from %s", fname) + # This keeps track of all the active handlers + c_handlers = helpers.ContentHandlers() + # Add any handlers in the cloud-dir register_handlers_in_dir(cdir) # Register any other handlers that come from the default set. This # is done after the cloud-dir handlers so that the cdir modules can # take over the default user-data handler content-types. - for mod in self._default_userdata_handlers(): + for mod in c_handlers_list: types = c_handlers.register(mod, overwrite=False) if types: LOG.debug("Added default handler for %s from %s", types, mod) @@ -420,7 +464,7 @@ class Init(object): # names... 'handlercount': 0, } - handlers.walk(user_data_msg, handlers.walker_callback, + handlers.walk(data_msg, handlers.walker_callback, data=part_data) def finalize_handlers(): @@ -442,6 +486,12 @@ class Init(object): finally: finalize_handlers() + def consume_data(self, frequency=PER_INSTANCE): + # Consume the userdata first, because we need want to let the part + # handlers run first (for merging stuff) + self._consume_userdata(frequency) + self._consume_vendordata(frequency) + # Perform post-consumption adjustments so that # modules that run during the init stage reflect # this consumed set. @@ -453,6 +503,82 @@ class Init(object): # objects before the load of the userdata happened, # this is expected. + def _consume_vendordata(self, frequency=PER_ALWAYS): + """ + Consume the vendordata and run the part handlers on it + """ + if not self.datasource.has_vendordata(): + LOG.info("datasource did not provide vendor data") + return + + # User-data should have been consumed first. If it has, then we can + # read it and simply parse it. This means that the datasource can + # define if the vendordata can be consumed too....i.e this method + # gives us a lot of flexibility. + _cc_merger = helpers.ConfigMerger(paths=self._paths, + datasource=self.datasource, + additional_fns=[], + base_cfg=self.cfg, + include_vendor=False) + _cc = _cc_merger.cfg + + if not self.datasource.consume_vendordata(): + if not isinstance(_cc, dict): + LOG.info(("userdata does explicitly allow vendordata " + "consumption")) + return + + if 'vendor_data' not in _cc: + LOG.info(("no 'vendor_data' directive found in the" + "conf files. Skipping consumption of vendordata")) + return + + # This allows for the datasource to signal explicit conditions when + # when the user has opted in to user-data + if self.datasource.consume_vendordata(): + LOG.info(("datasource has indicated that vendordata that user" + " opted-in via another channel")) + + vdc = _cc.get('vendor_data') + no_handlers = None + if isinstance(vdc, dict): + enabled = vdc.get('enabled') + no_handlers = vdc.get('no_run') + + if enabled is None: + LOG.info("vendordata will not be consumed: user has not opted-in") + return + elif util.is_false(enabled): + LOG.info("user has requested NO vendordata consumption") + return + + LOG.info("vendor data will be consumed") + + # Ensure vendordata source fetched before activation (just incase) + vendor_data_msg = self.datasource.get_vendordata(True) + + # This keeps track of all the active handlers, while excluding what the + # users doesn't want run, i.e. boot_hook, cloud_config, shell_script + c_handlers_list = self._default_vendordata_handlers( + excluded=no_handlers) + + # Run the handlers + self._do_handlers(vendor_data_msg, c_handlers_list, frequency) + + def _consume_userdata(self, frequency=PER_INSTANCE): + """ + Consume the userdata and run the part handlers + """ + + # Ensure datasource fetched before activation (just incase) + user_data_msg = self.datasource.get_userdata(True) + + # This keeps track of all the active handlers + c_handlers_list = self._default_userdata_handlers() + + # Run the handlers + self._do_handlers(user_data_msg, c_handlers_list, frequency) + class Modules(object): def __init__(self, init, cfg_files=None): diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index d49ea094..3032ef70 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -88,7 +88,11 @@ class UserDataProcessor(object): def process(self, blob): accumulating_msg = MIMEMultipart() - self._process_msg(convert_string(blob), accumulating_msg) + if isinstance(blob, list): + for b in blob: + self._process_msg(convert_string(b), accumulating_msg) + else: + self._process_msg(convert_string(blob), accumulating_msg) return accumulating_msg def _process_msg(self, base_msg, append_msg): diff --git a/cloudinit/util.py b/cloudinit/util.py index a8ddb390..b69e2bb0 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -606,7 +606,7 @@ def del_dir(path): shutil.rmtree(path) -def runparts(dirp, skip_no_exist=True): +def runparts(dirp, skip_no_exist=True, exe_prefix=None): if skip_no_exist and not os.path.isdir(dirp): return @@ -617,7 +617,10 @@ def runparts(dirp, skip_no_exist=True): if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): attempted.append(exe_path) try: - subp([exe_path], capture=False) + exe_cmd = exe_prefix + if isinstance(exe_prefix, list): + exe_cmd.extend(exe_path) + subp([exe_cmd], capture=False) except ProcessExecutionError as e: logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code) failed.append(e) @@ -1847,3 +1850,26 @@ def expand_dotted_devname(dotted): return toks else: return (dotted, None) + + +def get_nested_option_as_list(dct, first, second): + """ + Return a nested option from a dict as a list + """ + if not isinstance(dct, dict): + raise TypeError("get_nested_option_as_list only works with dicts") + root = dct.get(first) + if not isinstance(root, dict): + return None + + token = root.get(second) + if isinstance(token, list): + return token + elif isinstance(token, dict): + ret_list = [] + for k, v in dct.iteritems(): + ret_list.append((k, v)) + return ret_list + elif isinstance(token, str): + return token.split() + return None diff --git a/config/cloud.cfg b/config/cloud.cfg index a07cd3b0..f325ad1e 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -64,6 +64,10 @@ cloud_config_modules: # The modules that run in the 'final' stage cloud_final_modules: - rightscale_userdata + - vendor-scripts-per-once + - vendor-scripts-per-boot + - vendor-scripts-per-instance + - script-vendor - scripts-per-once - scripts-per-boot - scripts-per-instance diff --git a/doc/examples/cloud-config-vendor-data.txt b/doc/examples/cloud-config-vendor-data.txt new file mode 100644 index 00000000..7f90847b --- /dev/null +++ b/doc/examples/cloud-config-vendor-data.txt @@ -0,0 +1,16 @@ +#cloud-config +# +# This explains how to control vendordata via a cloud-config +# +# On select Datasources, vendors have a channel for the consumptions +# of all support user-data types via a special channel called +# vendordata. Users of the end system are given ultimate control. +# +vendor_data: + enabled: True + prefix: /usr/bin/ltrace + +# enabled: whether it is enabled or not +# prefix: the command to run before any vendor scripts. +# Note: this is a fairly weak method of containment. It should +# be used to profile a script, not to prevent its run diff --git a/doc/vendordata.txt b/doc/vendordata.txt new file mode 100644 index 00000000..63a6c999 --- /dev/null +++ b/doc/vendordata.txt @@ -0,0 +1,93 @@ +=== Overview === +Vendordata is data provided by the entity that launches an instance. +The cloud provider makes this data available to the instance via in one +way or another. + +Vendordata follows the same rules as user-data, with the following +caveauts: + 1. Users have ultimate control over vendordata + 2. By default it only runs on first boot + 3. Vendordata runs at the users pleasure. If the use of + vendordata is required for the instance to run, then + vendordata should not be used. + 4. Most vendor operations should be done either via script, + boot_hook or upstart job. + +Vendors utilizing the vendordata channel are strongly advised to +use the #cloud-config-jsonp method, otherwise they risk that a +user can accidently override choices. + +Further, we strongly advise vendors to not 'be evil'. By evil, we +mean any action that could compromise a system. Since users trust +you, please take care to make sure that any vendordata is safe, +atomic, indopenant and does not put your users at risk. + +cloud-init can read this input and act on it in different ways. + +=== Input Formats === +cloud-init will download and cache to filesystem any vendor-data that it +finds. However, certain types of vendor-data are handled specially. + + * Gzip Compressed Content + content found to be gzip compressed will be uncompressed, and + these rules applied to the uncompressed data + + * Mime Multi Part archive + This list of rules is applied to each part of this multi-part file + Using a mime-multi part file, the user can specify more than one + type of data. For example, both a user data script and a + cloud-config type could be specified. + + * vendor-data Script + begins with: #! or Content-Type: text/x-shellscript + script will be executed at "rc.local-like" level during first boot. + rc.local-like means "very late in the boot sequence" + + * Include File + begins with #include or Content-Type: text/x-include-url + This content is a "include" file. The file contains a list of + urls, one per line. Each of the URLs will be read, and their content + will be passed through this same set of rules. Ie, the content + read from the URL can be gzipped, mime-multi-part, or plain text + +* Include File Once + begins with #include-once or Content-Type: text/x-include-once-url + This content is a "include" file. The file contains a list of + urls, one per line. Each of the URLs will be read, and their content + will be passed through this same set of rules. Ie, the content + read from the URL can be gzipped, mime-multi-part, or plain text + This file will just be downloaded only once per instance, and its + contents cached for subsequent boots. This allows you to pass in + one-time-use or expiring URLs. + + * Cloud Config Data + begins with #cloud-config or Content-Type: text/cloud-config + + This content is "cloud-config" data. See the examples for a + commented example of supported config formats. + + * Upstart Job + begins with #upstart-job or Content-Type: text/upstart-job + + Content is placed into a file in /etc/init, and will be consumed + by upstart as any other upstart job. + + * Cloud Boothook + begins with #cloud-boothook or Content-Type: text/cloud-boothook + + This content is "boothook" data. It is stored in a file under + /var/lib/cloud and then executed immediately. + + This is the earliest "hook" available. Note, that there is no + mechanism provided for running only once. The boothook must take + care of this itself. It is provided with the instance id in the + environment variable "INSTANCE_ID". This could be made use of to + provide a 'once-per-instance' + +=== Examples === +There are examples in the examples subdirectory. +Additionally, the 'tools' directory contains 'write-mime-multipart', +which can be used to easily generate mime-multi-part files from a list +of input files. That data can then be given to an instance. + +See 'write-mime-multipart --help' for usage. diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py new file mode 100644 index 00000000..44395f06 --- /dev/null +++ b/tests/unittests/test_data.py @@ -0,0 +1,505 @@ +"""Tests for handling of userdata within cloud init.""" + +import StringIO + +import gzip +import logging +import os + +from email.mime.application import MIMEApplication +from email.mime.base import MIMEBase +from email.mime.multipart import MIMEMultipart + +from cloudinit import handlers +from cloudinit import helpers as c_helpers +from cloudinit import log +from cloudinit.settings import (PER_INSTANCE) +from cloudinit import sources +from cloudinit import stages +from cloudinit import util + +INSTANCE_ID = "i-testing" + +from tests.unittests import helpers + + +class FakeDataSource(sources.DataSource): + + def __init__(self, userdata=None, vendordata=None, + consume_vendor=False): + sources.DataSource.__init__(self, {}, None, None) + self.metadata = {'instance-id': INSTANCE_ID} + self.userdata_raw = userdata + self.vendordata_raw = vendordata + self._consume_vendor = consume_vendor + + def consume_vendordata(self): + return self._consume_vendor + + +# FIXME: these tests shouldn't be checking log output?? +# Weirddddd... +class TestConsumeUserData(helpers.FilesystemMockingTestCase): + + def setUp(self): + helpers.FilesystemMockingTestCase.setUp(self) + self._log = None + self._log_file = None + self._log_handler = None + + def tearDown(self): + helpers.FilesystemMockingTestCase.tearDown(self) + if self._log_handler and self._log: + self._log.removeHandler(self._log_handler) + + def _patchIn(self, root): + self.restore() + self.patchOS(root) + self.patchUtils(root) + + def capture_log(self, lvl=logging.DEBUG): + log_file = StringIO.StringIO() + self._log_handler = logging.StreamHandler(log_file) + self._log_handler.setLevel(lvl) + self._log = log.getLogger() + self._log.addHandler(self._log_handler) + return log_file + + def test_simple_jsonp(self): + blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "qux" }, + { "op": "add", "path": "/bar", "value": "qux2" } +] +''' + + ci = stages.Init() + ci.datasource = FakeDataSource(blob) + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + ci.fetch() + ci.consume_data() + cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) + cc = util.load_yaml(cc_contents) + self.assertEquals(2, len(cc)) + self.assertEquals('qux', cc['baz']) + self.assertEquals('qux2', cc['bar']) + + def test_simple_jsonp_vendor_and_user(self): + # test that user-data wins over vendor + user_blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "qux" }, + { "op": "add", "path": "/bar", "value": "qux2" }, + { "op": "add", "path": "/vendor_data", "value": {"enabled": "true"}} +] +''' + vendor_blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "quxA" }, + { "op": "add", "path": "/bar", "value": "quxB" }, + { "op": "add", "path": "/foo", "value": "quxC" } +] +''' + new_root = self.makeDir() + self._patchIn(new_root) + initer = stages.Init() + initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) + initer.read_cfg() + initer.initialize() + initer.fetch() + _iid = initer.instancify() + initer.update() + initer.cloudify().run('consume_data', + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mods = stages.Modules(initer) + (_which_ran, _failures) = mods.run_section('cloud_init_modules') + cfg = mods.cfg + self.assertIn('vendor_data', cfg) + self.assertEquals('qux', cfg['baz']) + self.assertEquals('qux2', cfg['bar']) + self.assertEquals('quxC', cfg['foo']) + + def test_simple_jsonp_no_vendor_consumed(self): + # make sure that vendor data is not consumed + user_blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "qux" }, + { "op": "add", "path": "/bar", "value": "qux2" } +] +''' + vendor_blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "quxA" }, + { "op": "add", "path": "/bar", "value": "quxB" }, + { "op": "add", "path": "/foo", "value": "quxC" } +] +''' + new_root = self.makeDir() + self._patchIn(new_root) + initer = stages.Init() + initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) + initer.read_cfg() + initer.initialize() + initer.fetch() + _iid = initer.instancify() + initer.update() + initer.cloudify().run('consume_data', + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mods = stages.Modules(initer) + (_which_ran, _failures) = mods.run_section('cloud_init_modules') + cfg = mods.cfg + self.assertEquals('qux', cfg['baz']) + self.assertEquals('qux2', cfg['bar']) + self.assertNotIn('foo', cfg) + + def test_mixed_cloud_config(self): + blob_cc = ''' +#cloud-config +a: b +c: d +''' + message_cc = MIMEBase("text", "cloud-config") + message_cc.set_payload(blob_cc) + + blob_jp = ''' +#cloud-config-jsonp +[ + { "op": "replace", "path": "/a", "value": "c" }, + { "op": "remove", "path": "/c" } +] +''' + + message_jp = MIMEBase('text', "cloud-config-jsonp") + message_jp.set_payload(blob_jp) + + message = MIMEMultipart() + message.attach(message_cc) + message.attach(message_jp) + + ci = stages.Init() + ci.datasource = FakeDataSource(str(message)) + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + ci.fetch() + ci.consume_data() + cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) + cc = util.load_yaml(cc_contents) + self.assertEquals(1, len(cc)) + self.assertEquals('c', cc['a']) + + def test_vendor_with_datasource_perm(self): + vendor_blob = ''' +#cloud-config +a: b +name: vendor +run: + - x + - y +''' + + new_root = self.makeDir() + self._patchIn(new_root) + initer = stages.Init() + initer.datasource = FakeDataSource('', vendordata=vendor_blob, + consume_vendor=True) + initer.read_cfg() + initer.initialize() + initer.fetch() + _iid = initer.instancify() + initer.update() + initer.cloudify().run('consume_data', + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mods = stages.Modules(initer) + (_which_ran, _failures) = mods.run_section('cloud_init_modules') + cfg = mods.cfg + self.assertEquals('b', cfg['a']) + self.assertEquals('vendor', cfg['name']) + self.assertIn('x', cfg['run']) + self.assertIn('y', cfg['run']) + + def test_vendor_user_yaml_cloud_config(self): + vendor_blob = ''' +#cloud-config +a: b +name: vendor +run: + - x + - y +''' + + user_blob = ''' +#cloud-config +a: c +vendor_data: + enabled: True + prefix: /bin/true +name: user +run: + - z +''' + new_root = self.makeDir() + self._patchIn(new_root) + initer = stages.Init() + initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) + initer.read_cfg() + initer.initialize() + initer.fetch() + _iid = initer.instancify() + initer.update() + initer.cloudify().run('consume_data', + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mods = stages.Modules(initer) + (_which_ran, _failures) = mods.run_section('cloud_init_modules') + cfg = mods.cfg + self.assertIn('vendor_data', cfg) + self.assertEquals('c', cfg['a']) + self.assertEquals('user', cfg['name']) + self.assertNotIn('x', cfg['run']) + self.assertNotIn('y', cfg['run']) + self.assertIn('z', cfg['run']) + + def test_vendordata_script(self): + vendor_blob = ''' +#!/bin/bash +echo "test" +''' + + user_blob = ''' +#cloud-config +vendor_data: + enabled: True + prefix: /bin/true +''' + new_root = self.makeDir() + self._patchIn(new_root) + initer = stages.Init() + initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) + initer.read_cfg() + initer.initialize() + initer.fetch() + _iid = initer.instancify() + initer.update() + initer.cloudify().run('consume_data', + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mods = stages.Modules(initer) + (_which_ran, _failures) = mods.run_section('cloud_init_modules') + cfg = mods.cfg + vendor_script = initer.paths.get_ipath_cur('vendor_scripts') + vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script) + self.assertTrue(os.path.exists(vendor_script_fns)) + + + + def test_merging_cloud_config(self): + blob = ''' +#cloud-config +a: b +e: f +run: + - b + - c +''' + message1 = MIMEBase("text", "cloud-config") + message1.set_payload(blob) + + blob2 = ''' +#cloud-config +a: e +e: g +run: + - stuff + - morestuff +''' + message2 = MIMEBase("text", "cloud-config") + message2['X-Merge-Type'] = ('dict(recurse_array,' + 'recurse_str)+list(append)+str(append)') + message2.set_payload(blob2) + + blob3 = ''' +#cloud-config +e: + - 1 + - 2 + - 3 +p: 1 +''' + message3 = MIMEBase("text", "cloud-config") + message3.set_payload(blob3) + + messages = [message1, message2, message3] + + paths = c_helpers.Paths({}, ds=FakeDataSource('')) + cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths) + + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None, + None) + for i, m in enumerate(messages): + headers = dict(m) + fn = "part-%s" % (i + 1) + payload = m.get_payload(decode=True) + cloud_cfg.handle_part(None, headers['Content-Type'], + fn, payload, None, headers) + cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None, + None) + contents = util.load_file(paths.get_ipath('cloud_config')) + contents = util.load_yaml(contents) + self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff']) + self.assertEquals(contents['a'], 'be') + self.assertEquals(contents['e'], [1, 2, 3]) + self.assertEquals(contents['p'], 1) + + def test_unhandled_type_warning(self): + """Raw text without magic is ignored but shows warning.""" + ci = stages.Init() + data = "arbitrary text\n" + ci.datasource = FakeDataSource(data) + + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + self.mocker.replay() + + log_file = self.capture_log(logging.WARNING) + ci.fetch() + ci.consume_data() + self.assertIn( + "Unhandled non-multipart (text/x-not-multipart) userdata:", + log_file.getvalue()) + + def test_mime_gzip_compressed(self): + """Tests that individual message gzip encoding works.""" + + def gzip_part(text): + contents = StringIO.StringIO() + f = gzip.GzipFile(fileobj=contents, mode='w') + f.write(str(text)) + f.flush() + f.close() + return MIMEApplication(contents.getvalue(), 'gzip') + + base_content1 = ''' +#cloud-config +a: 2 +''' + + base_content2 = ''' +#cloud-config +b: 3 +c: 4 +''' + + message = MIMEMultipart('test') + message.attach(gzip_part(base_content1)) + message.attach(gzip_part(base_content2)) + ci = stages.Init() + ci.datasource = FakeDataSource(str(message)) + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + ci.fetch() + ci.consume_data() + contents = util.load_file(ci.paths.get_ipath("cloud_config")) + contents = util.load_yaml(contents) + self.assertTrue(isinstance(contents, dict)) + self.assertEquals(3, len(contents)) + self.assertEquals(2, contents['a']) + self.assertEquals(3, contents['b']) + self.assertEquals(4, contents['c']) + + def test_mime_text_plain(self): + """Mime message of type text/plain is ignored but shows warning.""" + ci = stages.Init() + message = MIMEBase("text", "plain") + message.set_payload("Just text") + ci.datasource = FakeDataSource(message.as_string()) + + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + self.mocker.replay() + + log_file = self.capture_log(logging.WARNING) + ci.fetch() + ci.consume_data() + self.assertIn( + "Unhandled unknown content-type (text/plain)", + log_file.getvalue()) + + def test_shellscript(self): + """Raw text starting #!/bin/sh is treated as script.""" + ci = stages.Init() + script = "#!/bin/sh\necho hello\n" + ci.datasource = FakeDataSource(script) + + outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + mock_write(outpath, script, 0700) + self.mocker.replay() + + log_file = self.capture_log(logging.WARNING) + ci.fetch() + ci.consume_data() + self.assertEqual("", log_file.getvalue()) + + def test_mime_text_x_shellscript(self): + """Mime message of type text/x-shellscript is treated as script.""" + ci = stages.Init() + script = "#!/bin/sh\necho hello\n" + message = MIMEBase("text", "x-shellscript") + message.set_payload(script) + ci.datasource = FakeDataSource(message.as_string()) + + outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + mock_write(outpath, script, 0700) + self.mocker.replay() + + log_file = self.capture_log(logging.WARNING) + ci.fetch() + ci.consume_data() + self.assertEqual("", log_file.getvalue()) + + def test_mime_text_plain_shell(self): + """Mime type text/plain starting #!/bin/sh is treated as script.""" + ci = stages.Init() + script = "#!/bin/sh\necho hello\n" + message = MIMEBase("text", "plain") + message.set_payload(script) + ci.datasource = FakeDataSource(message.as_string()) + + outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(outpath, script, 0700) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + self.mocker.replay() + + log_file = self.capture_log(logging.WARNING) + ci.fetch() + ci.consume_data() + self.assertEqual("", log_file.getvalue()) diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index d9c3a455..5ffe95a2 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -35,8 +35,8 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): initer.datasource.userdata_raw = ud _iid = initer.instancify() initer.update() - initer.cloudify().run('consume_userdata', - initer.consume_userdata, + initer.cloudify().run('consume_data', + initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mirrors = initer.distro.get_option('package_mirrors') diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 60ef812a..9a7178d1 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -66,8 +66,8 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): initer.update() self.assertTrue(os.path.islink("var/lib/cloud/instance")) - initer.cloudify().run('consume_userdata', - initer.consume_userdata, + initer.cloudify().run('consume_data', + initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py deleted file mode 100644 index 5ffe8f0a..00000000 --- a/tests/unittests/test_userdata.py +++ /dev/null @@ -1,308 +0,0 @@ -"""Tests for handling of userdata within cloud init.""" - -import StringIO - -import gzip -import logging -import os - -from email.mime.application import MIMEApplication -from email.mime.base import MIMEBase -from email.mime.multipart import MIMEMultipart - -from cloudinit import handlers -from cloudinit import helpers as c_helpers -from cloudinit import log -from cloudinit import sources -from cloudinit import stages -from cloudinit import util - -INSTANCE_ID = "i-testing" - -from tests.unittests import helpers - - -class FakeDataSource(sources.DataSource): - - def __init__(self, userdata): - sources.DataSource.__init__(self, {}, None, None) - self.metadata = {'instance-id': INSTANCE_ID} - self.userdata_raw = userdata - - -# FIXME: these tests shouldn't be checking log output?? -# Weirddddd... -class TestConsumeUserData(helpers.FilesystemMockingTestCase): - - def setUp(self): - helpers.FilesystemMockingTestCase.setUp(self) - self._log = None - self._log_file = None - self._log_handler = None - - def tearDown(self): - helpers.FilesystemMockingTestCase.tearDown(self) - if self._log_handler and self._log: - self._log.removeHandler(self._log_handler) - - def capture_log(self, lvl=logging.DEBUG): - log_file = StringIO.StringIO() - self._log_handler = logging.StreamHandler(log_file) - self._log_handler.setLevel(lvl) - self._log = log.getLogger() - self._log.addHandler(self._log_handler) - return log_file - - def test_simple_jsonp(self): - blob = ''' -#cloud-config-jsonp -[ - { "op": "add", "path": "/baz", "value": "qux" }, - { "op": "add", "path": "/bar", "value": "qux2" } -] -''' - - ci = stages.Init() - ci.datasource = FakeDataSource(blob) - new_root = self.makeDir() - self.patchUtils(new_root) - self.patchOS(new_root) - ci.fetch() - ci.consume_userdata() - cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) - cc = util.load_yaml(cc_contents) - self.assertEquals(2, len(cc)) - self.assertEquals('qux', cc['baz']) - self.assertEquals('qux2', cc['bar']) - - def test_mixed_cloud_config(self): - blob_cc = ''' -#cloud-config -a: b -c: d -''' - message_cc = MIMEBase("text", "cloud-config") - message_cc.set_payload(blob_cc) - - blob_jp = ''' -#cloud-config-jsonp -[ - { "op": "replace", "path": "/a", "value": "c" }, - { "op": "remove", "path": "/c" } -] -''' - - message_jp = MIMEBase('text', "cloud-config-jsonp") - message_jp.set_payload(blob_jp) - - message = MIMEMultipart() - message.attach(message_cc) - message.attach(message_jp) - - ci = stages.Init() - ci.datasource = FakeDataSource(str(message)) - new_root = self.makeDir() - self.patchUtils(new_root) - self.patchOS(new_root) - ci.fetch() - ci.consume_userdata() - cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) - cc = util.load_yaml(cc_contents) - self.assertEquals(1, len(cc)) - self.assertEquals('c', cc['a']) - - def test_merging_cloud_config(self): - blob = ''' -#cloud-config -a: b -e: f -run: - - b - - c -''' - message1 = MIMEBase("text", "cloud-config") - message1.set_payload(blob) - - blob2 = ''' -#cloud-config -a: e -e: g -run: - - stuff - - morestuff -''' - message2 = MIMEBase("text", "cloud-config") - message2['X-Merge-Type'] = ('dict(recurse_array,' - 'recurse_str)+list(append)+str(append)') - message2.set_payload(blob2) - - blob3 = ''' -#cloud-config -e: - - 1 - - 2 - - 3 -p: 1 -''' - message3 = MIMEBase("text", "cloud-config") - message3.set_payload(blob3) - - messages = [message1, message2, message3] - - paths = c_helpers.Paths({}, ds=FakeDataSource('')) - cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths) - - new_root = self.makeDir() - self.patchUtils(new_root) - self.patchOS(new_root) - cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None, - None) - for i, m in enumerate(messages): - headers = dict(m) - fn = "part-%s" % (i + 1) - payload = m.get_payload(decode=True) - cloud_cfg.handle_part(None, headers['Content-Type'], - fn, payload, None, headers) - cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None, - None) - contents = util.load_file(paths.get_ipath('cloud_config')) - contents = util.load_yaml(contents) - self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff']) - self.assertEquals(contents['a'], 'be') - self.assertEquals(contents['e'], [1, 2, 3]) - self.assertEquals(contents['p'], 1) - - def test_unhandled_type_warning(self): - """Raw text without magic is ignored but shows warning.""" - ci = stages.Init() - data = "arbitrary text\n" - ci.datasource = FakeDataSource(data) - - mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) - mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) - self.mocker.replay() - - log_file = self.capture_log(logging.WARNING) - ci.fetch() - ci.consume_userdata() - self.assertIn( - "Unhandled non-multipart (text/x-not-multipart) userdata:", - log_file.getvalue()) - - def test_mime_gzip_compressed(self): - """Tests that individual message gzip encoding works.""" - - def gzip_part(text): - contents = StringIO.StringIO() - f = gzip.GzipFile(fileobj=contents, mode='w') - f.write(str(text)) - f.flush() - f.close() - return MIMEApplication(contents.getvalue(), 'gzip') - - base_content1 = ''' -#cloud-config -a: 2 -''' - - base_content2 = ''' -#cloud-config -b: 3 -c: 4 -''' - - message = MIMEMultipart('test') - message.attach(gzip_part(base_content1)) - message.attach(gzip_part(base_content2)) - ci = stages.Init() - ci.datasource = FakeDataSource(str(message)) - new_root = self.makeDir() - self.patchUtils(new_root) - self.patchOS(new_root) - ci.fetch() - ci.consume_userdata() - contents = util.load_file(ci.paths.get_ipath("cloud_config")) - contents = util.load_yaml(contents) - self.assertTrue(isinstance(contents, dict)) - self.assertEquals(3, len(contents)) - self.assertEquals(2, contents['a']) - self.assertEquals(3, contents['b']) - self.assertEquals(4, contents['c']) - - def test_mime_text_plain(self): - """Mime message of type text/plain is ignored but shows warning.""" - ci = stages.Init() - message = MIMEBase("text", "plain") - message.set_payload("Just text") - ci.datasource = FakeDataSource(message.as_string()) - - mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) - mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) - self.mocker.replay() - - log_file = self.capture_log(logging.WARNING) - ci.fetch() - ci.consume_userdata() - self.assertIn( - "Unhandled unknown content-type (text/plain)", - log_file.getvalue()) - - def test_shellscript(self): - """Raw text starting #!/bin/sh is treated as script.""" - ci = stages.Init() - script = "#!/bin/sh\necho hello\n" - ci.datasource = FakeDataSource(script) - - outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") - mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) - mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) - mock_write(outpath, script, 0700) - self.mocker.replay() - - log_file = self.capture_log(logging.WARNING) - ci.fetch() - ci.consume_userdata() - self.assertEqual("", log_file.getvalue()) - - def test_mime_text_x_shellscript(self): - """Mime message of type text/x-shellscript is treated as script.""" - ci = stages.Init() - script = "#!/bin/sh\necho hello\n" - message = MIMEBase("text", "x-shellscript") - message.set_payload(script) - ci.datasource = FakeDataSource(message.as_string()) - - outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") - mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) - mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) - mock_write(outpath, script, 0700) - self.mocker.replay() - - log_file = self.capture_log(logging.WARNING) - ci.fetch() - ci.consume_userdata() - self.assertEqual("", log_file.getvalue()) - - def test_mime_text_plain_shell(self): - """Mime type text/plain starting #!/bin/sh is treated as script.""" - ci = stages.Init() - script = "#!/bin/sh\necho hello\n" - message = MIMEBase("text", "plain") - message.set_payload(script) - ci.datasource = FakeDataSource(message.as_string()) - - outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") - mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) - mock_write(outpath, script, 0700) - mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) - self.mocker.replay() - - log_file = self.capture_log(logging.WARNING) - ci.fetch() - ci.consume_userdata() - self.assertEqual("", log_file.getvalue()) -- cgit v1.2.3 From 9874d0590dba4a67ff7268a6a1d22207088e1a13 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Thu, 9 Jan 2014 08:31:52 -0700 Subject: Added vendordata to SmartOS --- cloudinit/sources/DataSourceSmartOS.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 551b20c4..ccfee931 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -47,6 +47,7 @@ SMARTOS_ATTRIB_MAP = { 'iptables_disable': ('iptables_disable', True), 'motd_sys_info': ('motd_sys_info', True), 'availability_zone': ('datacenter_name', True), + 'vendordata': ('sdc:operator-script', False), } DS_NAME = 'SmartOS' @@ -154,6 +155,7 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud + self.vendordata_raw = vendordata return True def device_name_to_device(self, name): -- cgit v1.2.3 From b9314a18a052e934643c93b81dc938b3e5b69307 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 9 Jan 2014 13:12:40 -0500 Subject: Azure: minor changes for filename as strings and logging. We were passing a unicode string to 'runcmd' in the path to the .crt file. That is because the keyname was coming from ovf file as unicode. Ie: u'/var/lib/waagent/6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7.crt' Then, logging was extending not appending errors. --- ChangeLog | 2 ++ cloudinit/sources/DataSourceAzure.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index 1286e7c1..8029f9af 100644 --- a/ChangeLog +++ b/ChangeLog @@ -7,6 +7,8 @@ apt_get_wrapper (LP: #1236531). - convert paths provided in config-drive 'files' to string before writing (LP: #1260072). + - Azure: minor changes in logging output. ensure filenames are strings (not + unicode). 0.7.4: - fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a partitioned block device with target filesystem on ephemeral0.1. diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b18c57e7..97f151d6 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -154,7 +154,7 @@ class DataSourceAzureNet(sources.DataSource): fp_files = [] for pk in self.cfg.get('_pubkeys', []): - bname = pk['fingerprint'] + ".crt" + bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(mycfg['data_dir'], bname)] missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", @@ -247,7 +247,7 @@ def pubkeys_from_crt_files(flist): try: pubkeys.append(crtfile_to_pubkey(fname)) except util.ProcessExecutionError: - errors.extend(fname) + errors.append(fname) if errors: LOG.warn("failed to convert the crt files to pubkey: %s" % errors) -- cgit v1.2.3 From 66aa9826b818c3478516104b38039fecbd717b6b Mon Sep 17 00:00:00 2001 From: Paul Querna Date: Thu, 9 Jan 2014 21:14:51 +0000 Subject: Allow a Config Drive source on a partition, if the label matches. --- cloudinit/sources/DataSourceConfigDrive.py | 6 ++++-- tests/unittests/test_datasource/test_configdrive.py | 5 +++-- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 4f437244..2a244496 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -284,8 +284,10 @@ def find_candidate_devs(): # followed by fstype items, but with dupes removed combined = (by_label + [d for d in by_fstype if d not in by_label]) - # We are looking for block device (sda, not sda1), ignore partitions - combined = [d for d in combined if not util.is_partition(d)] + # We are looking for a block device or partition with necessary label or + # an unpartitioned block device. + combined = [d for d in combined + if d in by_label or not util.is_partition(d)] return combined diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index d5935294..3c1e8add 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -285,10 +285,11 @@ class TestConfigDriveDataSource(MockerTestCase): self.assertEqual(["/dev/vdb", "/dev/zdd"], ds.find_candidate_devs()) - # verify that partitions are not considered + # verify that partitions are considered, but only if they have a label. devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]} - self.assertEqual([], ds.find_candidate_devs()) + self.assertEqual(["/dev/vdb3"], + ds.find_candidate_devs()) finally: util.find_devs_with = orig_find_devs_with -- cgit v1.2.3 From 6c7e09cc8ab6955fe2c03a925d74ebdce19e7b56 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 10 Jan 2014 16:14:54 -0800 Subject: Bring back the ec2 utils, non-boto userdata/metadata reading This replacement uses our own userdata/metadata ec2 webservice parser that we can easily modify, it also automatically allows for reading the ec2 userdata/metdata from files and also brings in the usage of requests instead of boto's usage of urllib which did not support ssl properly. --- cloudinit/ec2_utils.py | 196 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 151 insertions(+), 45 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index fcd511c5..605154bc 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -16,48 +16,154 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import boto.utils as boto_utils - -# Versions of boto >= 2.6.0 (and possibly 2.5.2) -# try to lazily load the metadata backing, which -# doesn't work so well in cloud-init especially -# since the metadata is serialized and actions are -# performed where the metadata server may be blocked -# (thus the datasource will start failing) resulting -# in url exceptions when fields that do exist (or -# would have existed) do not exist due to the blocking -# that occurred. - -# TODO(harlowja): https://github.com/boto/boto/issues/1401 -# When boto finally moves to using requests, we should be able -# to provide it ssl details, it does not yet, so we can't provide them... - - -def _unlazy_dict(mp): - if not isinstance(mp, (dict)): - return mp - # Walk over the keys/values which - # forces boto to unlazy itself and - # has no effect on dictionaries that - # already have there items. - for (_k, v) in mp.items(): - _unlazy_dict(v) - return mp - - -def get_instance_userdata(api_version, metadata_address): - # Note: boto.utils.get_instance_metadata returns '' for empty string - # so the change from non-true to '' is not specifically necessary, but - # this way cloud-init will get consistent behavior even if boto changed - # in the future to return a None on "no user-data provided". - ud = boto_utils.get_instance_userdata(api_version, None, metadata_address) - if not ud: - ud = '' - return ud - - -def get_instance_metadata(api_version, metadata_address): - metadata = boto_utils.get_instance_metadata(api_version, metadata_address) - if not isinstance(metadata, (dict)): - metadata = {} - return _unlazy_dict(metadata) +from urlparse import (urlparse, urlunparse) + +import functools +import json +import urllib + +from cloudinit import log as logging +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +def combine_url(base, add_on): + base_parsed = list(urlparse(base)) + path = base_parsed[2] + if path and not path.endswith("/"): + path += "/" + path += urllib.quote(str(add_on), safe="/:") + base_parsed[2] = path + return urlunparse(base_parsed) + + +# See: http://bit.ly/TyoUQs +# +# Since boto metadata reader uses the old urllib which does not +# support ssl, we need to ahead and create our own reader which +# works the same as the boto one (for now). +class MetadataMaterializer(object): + def __init__(self, blob, base_url, caller): + self._blob = blob + self._md = None + self._base_url = base_url + self._caller = caller + + def _parse(self, blob): + leaves = {} + children = [] + if not blob: + return (leaves, children) + + def has_children(item): + if item.endswith("/"): + return True + else: + return False + + def get_name(item): + if item.endswith("/"): + return item.rstrip("/") + return item + + for field in blob.splitlines(): + field = field.strip() + field_name = get_name(field) + if not field or not field_name: + continue + if has_children(field): + if field_name not in children: + children.append(field_name) + else: + contents = field.split("=", 1) + resource = field_name + if len(contents) > 1: + # What a PITA... + (ident, sub_contents) = contents + checked_ident = util.safe_int(ident) + if checked_ident is not None: + resource = "%s/openssh-key" % (checked_ident) + field_name = sub_contents + leaves[field_name] = resource + return (leaves, children) + + def materialize(self): + if self._md is not None: + return self._md + self._md = self._materialize(self._blob, self._base_url) + return self._md + + def _decode_leaf_blob(self, blob): + if not blob: + return blob + stripped_blob = blob.strip() + if stripped_blob.startswith("{") and stripped_blob.endswith("}"): + # Assume and try with json + try: + return json.loads(blob) + except (ValueError, TypeError): + pass + if blob.find("\n") != -1: + return blob.splitlines() + return blob + + def _materialize(self, blob, base_url): + (leaves, children) = self._parse(blob) + child_contents = {} + for c in children: + child_url = combine_url(base_url, c) + if not child_url.endswith("/"): + child_url += "/" + child_blob = str(self._caller(child_url)) + child_contents[c] = self._materialize(child_blob, child_url) + leaf_contents = {} + for (field, resource) in leaves.items(): + leaf_url = combine_url(base_url, resource) + leaf_blob = str(self._caller(leaf_url)) + leaf_contents[field] = self._decode_leaf_blob(leaf_blob) + joined = {} + joined.update(child_contents) + for field in leaf_contents.keys(): + if field in joined: + LOG.warn("Duplicate key found in results from %s", base_url) + else: + joined[field] = leaf_contents[field] + return joined + + +def get_instance_userdata(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5): + ud_url = combine_url(metadata_address, api_version) + ud_url = combine_url(ud_url, 'user-data') + try: + response = util.read_file_or_url(ud_url, + ssl_details=ssl_details, + timeout=timeout, + retries=retries) + return str(response) + except Exception: + util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) + return None + + +def get_instance_metadata(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5): + md_url = combine_url(metadata_address, api_version) + md_url = combine_url(md_url, 'meta-data') + caller = functools.partial(util.read_file_or_url, + ssl_details=ssl_details, timeout=timeout, + retries=retries) + + try: + response = caller(md_url) + materializer = MetadataMaterializer(str(response), md_url, caller) + md = materializer.materialize() + if not isinstance(md, (dict)): + md = {} + return md + except Exception: + util.logexc(LOG, "Failed fetching metadata from url %s", md_url) + return {} -- cgit v1.2.3 From 5cdaaee97ea850adc75a4e88dbd2578c84bded51 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 10 Jan 2014 17:17:06 -0800 Subject: Add ec2 utils tests and httpretty requirement for http mocking --- Requires | 6 +- cloudinit/ec2_utils.py | 8 +-- tests/unittests/test_ec2_util.py | 133 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 140 insertions(+), 7 deletions(-) create mode 100644 tests/unittests/test_ec2_util.py (limited to 'cloudinit') diff --git a/Requires b/Requires index f19c9691..e847506f 100644 --- a/Requires +++ b/Requires @@ -29,8 +29,8 @@ argparse # Requests handles ssl correctly! requests -# Boto for ec2 -boto - # For patching pieces of cloud-config together jsonpatch + +# For http testing (only needed for testing) +httpretty>=0.7.1 diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 605154bc..c46adc6b 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -81,9 +81,9 @@ class MetadataMaterializer(object): if len(contents) > 1: # What a PITA... (ident, sub_contents) = contents - checked_ident = util.safe_int(ident) - if checked_ident is not None: - resource = "%s/openssh-key" % (checked_ident) + ident = util.safe_int(ident) + if ident is not None: + resource = "%s/openssh-key" % (ident) field_name = sub_contents leaves[field_name] = resource return (leaves, children) @@ -145,7 +145,7 @@ def get_instance_userdata(api_version='latest', return str(response) except Exception: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) - return None + return '' def get_instance_metadata(api_version='latest', diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py new file mode 100644 index 00000000..6d25dcf9 --- /dev/null +++ b/tests/unittests/test_ec2_util.py @@ -0,0 +1,133 @@ +from tests.unittests import helpers + +from cloudinit import ec2_utils as eu + +import httpretty as hp + + +class TestEc2Util(helpers.TestCase): + VERSION = 'latest' + + @hp.activate + def test_userdata_fetch(self): + hp.register_uri(hp.GET, + 'http://169.254.169.254/%s/user-data' % (self.VERSION), + body='stuff', + status=200) + userdata = eu.get_instance_userdata(self.VERSION) + self.assertEquals('stuff', userdata) + + @hp.activate + def test_userdata_fetch_fail_not_found(self): + hp.register_uri(hp.GET, + 'http://169.254.169.254/%s/user-data' % (self.VERSION), + status=404) + userdata = eu.get_instance_userdata(self.VERSION, retries=0) + self.assertEquals('', userdata) + + + @hp.activate + def test_userdata_fetch_fail_server_dead(self): + hp.register_uri(hp.GET, + 'http://169.254.169.254/%s/user-data' % (self.VERSION), + status=500) + userdata = eu.get_instance_userdata(self.VERSION, retries=0) + self.assertEquals('', userdata) + + @hp.activate + def test_metadata_fetch_no_keys(self): + base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION) + hp.register_uri(hp.GET, base_url, status=200, + body="\n".join(['hostname', + 'instance-id', + 'ami-launch-index'])) + hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'), + status=200, body='ec2.fake.host.name.com') + hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'), + status=200, body='123') + hp.register_uri(hp.GET, eu.combine_url(base_url, 'ami-launch-index'), + status=200, body='1') + md = eu.get_instance_metadata(self.VERSION, retries=0) + self.assertEquals(md['hostname'], 'ec2.fake.host.name.com') + self.assertEquals(md['instance-id'], '123') + self.assertEquals(md['ami-launch-index'], '1') + + @hp.activate + def test_metadata_fetch_key(self): + base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION) + hp.register_uri(hp.GET, base_url, status=200, + body="\n".join(['hostname', + 'instance-id', + 'public-keys/'])) + hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'), + status=200, body='ec2.fake.host.name.com') + hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'), + status=200, body='123') + hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'), + status=200, body='0=my-public-key') + hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'), + status=200, body='0=my-public-key') + hp.register_uri(hp.GET, + eu.combine_url(base_url, 'public-keys/0/openssh-key'), + status=200, body='ssh-rsa AAAA.....wZEf my-public-key') + md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) + self.assertEquals(md['hostname'], 'ec2.fake.host.name.com') + self.assertEquals(md['instance-id'], '123') + self.assertEquals(1, len(md['public-keys'])) + + @hp.activate + def test_metadata_fetch_key(self): + base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION) + hp.register_uri(hp.GET, base_url, status=200, + body="\n".join(['hostname', + 'instance-id', + 'public-keys/'])) + hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'), + status=200, body='ec2.fake.host.name.com') + hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'), + status=200, body='123') + hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'), + status=200, + body="\n".join(['0=my-public-key', '1=my-other-key'])) + hp.register_uri(hp.GET, + eu.combine_url(base_url, 'public-keys/0/openssh-key'), + status=200, body='ssh-rsa AAAA.....wZEf my-public-key') + hp.register_uri(hp.GET, + eu.combine_url(base_url, 'public-keys/1/openssh-key'), + status=200, body='ssh-rsa AAAA.....wZEf my-other-key') + md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) + self.assertEquals(md['hostname'], 'ec2.fake.host.name.com') + self.assertEquals(md['instance-id'], '123') + self.assertEquals(2, len(md['public-keys'])) + + @hp.activate + def test_metadata_fetch_bdm(self): + base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION) + hp.register_uri(hp.GET, base_url, status=200, + body="\n".join(['hostname', + 'instance-id', + 'block-device-mapping/'])) + hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'), + status=200, body='ec2.fake.host.name.com') + hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'), + status=200, body='123') + hp.register_uri(hp.GET, + eu.combine_url(base_url, 'block-device-mapping/'), + status=200, + body="\n".join(['ami', 'ephemeral0'])) + hp.register_uri(hp.GET, + eu.combine_url(base_url, 'block-device-mapping/ami'), + status=200, + body="sdb") + hp.register_uri(hp.GET, + eu.combine_url(base_url, + 'block-device-mapping/ephemeral0'), + status=200, + body="sdc") + md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) + self.assertEquals(md['hostname'], 'ec2.fake.host.name.com') + self.assertEquals(md['instance-id'], '123') + bdm = md['block-device-mapping'] + self.assertEquals(2, len(bdm)) + self.assertEquals(bdm['ami'], 'sdb') + self.assertEquals(bdm['ephemeral0'], 'sdc') -- cgit v1.2.3 From 4b70c9a952f0d12a3379cd5c1d930c20f5154134 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 10 Jan 2014 17:37:10 -0800 Subject: Add a maybe_json helper function --- cloudinit/ec2_utils.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index c46adc6b..525059f7 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -28,6 +28,17 @@ from cloudinit import util LOG = logging.getLogger(__name__) +def maybe_json(text): + if not text: + return False + text = text.strip() + if text.startswith("{") and text.endswith("}"): + return True + if text.startswith("[") and text.endswith("]"): + return True + return False + + def combine_url(base, add_on): base_parsed = list(urlparse(base)) path = base_parsed[2] @@ -94,16 +105,16 @@ class MetadataMaterializer(object): self._md = self._materialize(self._blob, self._base_url) return self._md - def _decode_leaf_blob(self, blob): + def _decode_leaf_blob(self, field, blob): if not blob: return blob - stripped_blob = blob.strip() - if stripped_blob.startswith("{") and stripped_blob.endswith("}"): - # Assume and try with json + if maybe_json(blob): try: + # Assume it's json, unless it fails parsing... return json.loads(blob) - except (ValueError, TypeError): - pass + except (ValueError, TypeError) as e: + LOG.warn("Field %s looked like json, but it was not: %s", + field, e) if blob.find("\n") != -1: return blob.splitlines() return blob @@ -121,7 +132,7 @@ class MetadataMaterializer(object): for (field, resource) in leaves.items(): leaf_url = combine_url(base_url, resource) leaf_blob = str(self._caller(leaf_url)) - leaf_contents[field] = self._decode_leaf_blob(leaf_blob) + leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob) joined = {} joined.update(child_contents) for field in leaf_contents.keys(): -- cgit v1.2.3 From 6f8c1d62e406675ad8524ce4fa97eac958d42238 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 10 Jan 2014 17:38:11 -0800 Subject: Only check for json objects instead of also arrays --- cloudinit/ec2_utils.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 525059f7..1fd674bb 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -28,14 +28,12 @@ from cloudinit import util LOG = logging.getLogger(__name__) -def maybe_json(text): +def maybe_json_object(text): if not text: return False text = text.strip() if text.startswith("{") and text.endswith("}"): return True - if text.startswith("[") and text.endswith("]"): - return True return False @@ -108,7 +106,7 @@ class MetadataMaterializer(object): def _decode_leaf_blob(self, field, blob): if not blob: return blob - if maybe_json(blob): + if maybe_json_object(blob): try: # Assume it's json, unless it fails parsing... return json.loads(blob) -- cgit v1.2.3 From 3e5e7f563837685aeabc2fc67dd6cbb9fc619b0a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 10 Jan 2014 17:38:54 -0800 Subject: Updated non-json message --- cloudinit/ec2_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 1fd674bb..c86623bc 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -111,8 +111,8 @@ class MetadataMaterializer(object): # Assume it's json, unless it fails parsing... return json.loads(blob) except (ValueError, TypeError) as e: - LOG.warn("Field %s looked like json, but it was not: %s", - field, e) + LOG.warn("Field %s looked like a json object, but it was" + " not: %s", field, e) if blob.find("\n") != -1: return blob.splitlines() return blob -- cgit v1.2.3 From 22f4ce4476a292f32c26f3d965f63145a644a164 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 15 Jan 2014 15:12:48 -0500 Subject: header, comment cleanup --- cloudinit/config/cc_scripts_vendor.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py index 5809a4ba..0168f668 100644 --- a/cloudinit/config/cc_scripts_vendor.py +++ b/cloudinit/config/cc_scripts_vendor.py @@ -1,11 +1,8 @@ # vi: ts=4 expandtab # -# Copyright (C) 2011-2014 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2014 Canonical Ltd. # -# Author: Scott Moser # Author: Ben Howard -# Author: Juerg Haefliger # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -31,9 +28,8 @@ SCRIPT_SUBDIR = 'vendor' def handle(name, _cfg, cloud, log, _args): - # This is written to by the user data handlers - # Ie, any custom shell scripts that come down - # go here... + # This is written to by the vendor data handlers + # any vendor data shell scripts get placed in runparts_path runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts', SCRIPT_SUBDIR) try: -- cgit v1.2.3 From 42f2cffadabfefb0469ade2f1f1c3ce5edabc9fa Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 15 Jan 2014 15:42:02 -0500 Subject: remove vendor-scripts-per-{boot,instance,once} I don't see a real need for these. The intent of the 'per-boot' or 'per-instance' or 'per-once' config modules is to handle running scripts that were already inserted into the instance. If the vendor is doing that, then there is value in vendor-data. Ie, they'd already modified the image, they might as well have just put the stuff in that they wanted. --- cloudinit/config/cc_vendor_scripts_per_boot.py | 43 ---------------------- cloudinit/config/cc_vendor_scripts_per_instance.py | 43 ---------------------- cloudinit/config/cc_vendor_scripts_per_once.py | 43 ---------------------- config/cloud.cfg | 3 -- 4 files changed, 132 deletions(-) delete mode 100644 cloudinit/config/cc_vendor_scripts_per_boot.py delete mode 100644 cloudinit/config/cc_vendor_scripts_per_instance.py delete mode 100644 cloudinit/config/cc_vendor_scripts_per_once.py (limited to 'cloudinit') diff --git a/cloudinit/config/cc_vendor_scripts_per_boot.py b/cloudinit/config/cc_vendor_scripts_per_boot.py deleted file mode 100644 index 80446e99..00000000 --- a/cloudinit/config/cc_vendor_scripts_per_boot.py +++ /dev/null @@ -1,43 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2011-2014 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# -# Author: Scott Moser -# Author: Ben Howard -# Author: Juerg Haefliger -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import os - -from cloudinit import util - -from cloudinit.settings import PER_ALWAYS - -frequency = PER_ALWAYS - -SCRIPT_SUBDIR = 'per-boot' - - -def handle(name, cfg, cloud, log, _args): - runparts_path = os.path.join(cloud.get_cpath(), 'scripts', 'vendor', - SCRIPT_SUBDIR) - vendor_prefix = util.get_nested_option_as_list(cfg, 'vendor_data', - 'prefix') - try: - util.runparts(runparts_path, exe_prefix=vendor_prefix) - except: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) - raise diff --git a/cloudinit/config/cc_vendor_scripts_per_instance.py b/cloudinit/config/cc_vendor_scripts_per_instance.py deleted file mode 100644 index 2d27a0c4..00000000 --- a/cloudinit/config/cc_vendor_scripts_per_instance.py +++ /dev/null @@ -1,43 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2011-2014 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# -# Author: Scott Moser -# Author: Ben Howard -# Author: Juerg Haefliger -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import os - -from cloudinit import util - -from cloudinit.settings import PER_INSTANCE - -frequency = PER_INSTANCE - -SCRIPT_SUBDIR = 'per-instance' - - -def handle(name, cfg, cloud, log, _args): - runparts_path = os.path.join(cloud.get_cpath(), 'scripts', 'vendor', - SCRIPT_SUBDIR) - vendor_prefix = util.get_nested_option_as_list(cfg, 'vendor_data', - 'prefix') - try: - util.runparts(runparts_path, exe_prefix=vendor_prefix) - except: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) - raise diff --git a/cloudinit/config/cc_vendor_scripts_per_once.py b/cloudinit/config/cc_vendor_scripts_per_once.py deleted file mode 100644 index ad3e13c8..00000000 --- a/cloudinit/config/cc_vendor_scripts_per_once.py +++ /dev/null @@ -1,43 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2011-2014 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# -# Author: Scott Moser -# Author: Ben Howard -# Author: Juerg Haefliger -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import os - -from cloudinit import util - -from cloudinit.settings import PER_ONCE - -frequency = PER_ONCE - -SCRIPT_SUBDIR = 'per-once' - - -def handle(name, cfg, cloud, log, _args): - runparts_path = os.path.join(cloud.get_cpath(), 'scripts', 'vendor', - SCRIPT_SUBDIR) - vendor_prefix = util.get_nested_option_as_list(cfg, 'vendor_data', - 'prefix') - try: - util.runparts(runparts_path, exe_prefix=vendor_prefix) - except: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) - raise diff --git a/config/cloud.cfg b/config/cloud.cfg index 8ed3522d..b746e3db 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -64,9 +64,6 @@ cloud_config_modules: # The modules that run in the 'final' stage cloud_final_modules: - rightscale_userdata - - vendor-scripts-per-once - - vendor-scripts-per-boot - - vendor-scripts-per-instance - scripts-vendor - scripts-per-once - scripts-per-boot -- cgit v1.2.3 From 9e19b276fdb6cf9c1f5252f6f9bdcba076c5f09e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 15 Jan 2014 17:13:24 -0500 Subject: replace get_nested_option_as_list with get_cfg_by_path, improve ruparts this makes runparts take exe_prefix and do string to list conversion inside. that means we don't have to do it in cc_scripts_vendor. Also, get_nested_option_as_list was essentially get_cfg_by_path anyway. --- cloudinit/config/cc_scripts_vendor.py | 7 +++++-- cloudinit/util.py | 38 ++++++++++------------------------- 2 files changed, 16 insertions(+), 29 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py index 0168f668..0c9e504e 100644 --- a/cloudinit/config/cc_scripts_vendor.py +++ b/cloudinit/config/cc_scripts_vendor.py @@ -27,13 +27,16 @@ frequency = PER_INSTANCE SCRIPT_SUBDIR = 'vendor' -def handle(name, _cfg, cloud, log, _args): +def handle(name, cfg, cloud, log, _args): # This is written to by the vendor data handlers # any vendor data shell scripts get placed in runparts_path runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts', SCRIPT_SUBDIR) + + prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), []) + try: - util.runparts(runparts_path) + util.runparts(runparts_path, exe_prefix=prefix) except: log.warn("Failed to run module %s (%s in %s)", name, SCRIPT_SUBDIR, runparts_path) diff --git a/cloudinit/util.py b/cloudinit/util.py index 6b30af5e..3ce54f28 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -614,15 +614,22 @@ def runparts(dirp, skip_no_exist=True, exe_prefix=None): failed = [] attempted = [] + + if exe_prefix is None: + prefix = [] + elif isinstance(exe_prefix, str): + prefix = [str(exe_prefix)] + elif isinstance(exe_prefix, list): + prefix = exe_prefix + else: + raise TypeError("exe_prefix must be None, str, or list") + for exe_name in sorted(os.listdir(dirp)): exe_path = os.path.join(dirp, exe_name) if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): attempted.append(exe_path) try: - exe_cmd = exe_prefix - if isinstance(exe_prefix, list): - exe_cmd.extend(exe_path) - subp([exe_cmd], capture=False) + subp(prefix + [exe_path], capture=False) except ProcessExecutionError as e: logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code) failed.append(e) @@ -1852,26 +1859,3 @@ def expand_dotted_devname(dotted): return toks else: return (dotted, None) - - -def get_nested_option_as_list(dct, first, second): - """ - Return a nested option from a dict as a list - """ - if not isinstance(dct, dict): - raise TypeError("get_nested_option_as_list only works with dicts") - root = dct.get(first) - if not isinstance(root, dict): - return None - - token = root.get(second) - if isinstance(token, list): - return token - elif isinstance(token, dict): - ret_list = [] - for k, v in dct.iteritems(): - ret_list.append((k, v)) - return ret_list - elif isinstance(token, str): - return token.split() - return None -- cgit v1.2.3 From 16cb9ef67725091235ddafd70b43bf715020d504 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 16 Jan 2014 16:53:21 -0500 Subject: cloudinit/settings.py: add vendor_data to built in config --- cloudinit/settings.py | 1 + 1 file changed, 1 insertion(+) (limited to 'cloudinit') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 5df7f557..7be2199a 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -52,6 +52,7 @@ CFG_BUILTIN = { }, 'distro': 'ubuntu', }, + 'vendor_data': {'enabled': True, 'prefix': []}, } # Valid frequencies of handlers/modules -- cgit v1.2.3 From 8a952c7c7797e2a1dfcd2be1c3a983de767de04e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 16 Jan 2014 16:54:23 -0500 Subject: DataSource: remove has_vendordata and consume_vendordata, drop filters remove apply_filter from get_vendordata. I can't think of a good reason to filter vendor-data per instance-id. remove has_vendordata and consume_vendordata. has vendordata is always "true", whether or not there is something to operate is determined by: if ds.vendordata_raw() consume_vendordata is based on config entirely. --- cloudinit/sources/__init__.py | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a7c7993f..7e11c1ca 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -82,25 +82,11 @@ class DataSource(object): return self._filter_xdata(self.userdata) return self.userdata - def get_vendordata(self, apply_filter=False): + def get_vendordata(self) if self.vendordata is None: self.vendordata = self.ud_proc.process(self.get_vendordata_raw()) - if apply_filter: - return self._filter_xdata(self.vendordata) return self.vendordata - def has_vendordata(self): - if self.vendordata_raw is not None: - return True - return False - - def consume_vendordata(self): - """ - The datasource may allow for consumption of vendordata, but only - when the datasource has allowed it. The default is false. - """ - return False - @property def launch_index(self): if not self.metadata: -- cgit v1.2.3 From 8209b21fc29c7d8585b8925a4deb929639797f9b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 16 Jan 2014 16:57:21 -0500 Subject: simplify consume_vendordata, move exclusion, consume_vendordata per instance this simplifies consume_vendordata a bit, changes consume_vendordata to be by default "PER_INSTANCE" (like userdata). The other thing we do here is move the exlusion to be handled when walking the data. The benefit of doing it then is that we can exclude part-handlers. Without the ability to exlude part handlers, exclusion is basically useless (since part-handlers could accomplish anything they wanted). --- cloudinit/handlers/__init__.py | 4 ++ cloudinit/stages.py | 130 +++++++++++++++-------------------------- 2 files changed, 52 insertions(+), 82 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 2ddc75f4..059d7495 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -187,6 +187,10 @@ def _escape_string(text): def walker_callback(data, filename, payload, headers): content_type = headers['Content-Type'] + if content_type in data.get('excluded'): + LOG.debug('content_type "%s" is excluded', content_type) + return + if content_type in PART_CONTENT_TYPES: walker_handle_handler(data, content_type, filename, payload) return diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 043b3257..19fbe706 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -338,56 +338,40 @@ class Init(object): processed_vd = "%s" % (self.datasource.get_vendordata()) util.write_file(self._get_ipath('vendordata'), processed_vd, 0600) - def _get_default_handlers(self, user_data=False, vendor_data=False, - excluded=None): - opts = { + def _default_handlers(self, opts=None): + if opts is None: + opts = {} + + opts.update({ 'paths': self.paths, 'datasource': self.datasource, - } - - def conditional_get(cls, mod): - cls_name = cls.__name__.split('.')[-1] - _mod = getattr(cls, mod) - if not excluded: - return _mod(**opts) - - if cls_name not in excluded: - _mod = getattr(cls, mod) - return _mod(**opts) - + }) # TODO(harlowja) Hmmm, should we dynamically import these?? def_handlers = [ - conditional_get(bh_part, 'BootHookPartHandler'), - conditional_get(up_part, 'UpstartJobPartHandler'), + cc_part.CloudConfigPartHandler(**opts), + ss_part.ShellScriptPartHandler(**opts), + bh_part.BootHookPartHandler(**opts), + up_part.UpstartJobPartHandler(**opts), ] - - # Add in the shell script part handler - if user_data: - def_handlers.extend([ - conditional_get(cc_part, 'CloudConfigPartHandler'), - conditional_get(ss_part, 'ShellScriptPartHandler')]) - - # This changes the path for the vendor script execution - if vendor_data: - opts['script_path'] = "vendor_scripts" - opts['cloud_config_path'] = "vendor_cloud_config" - def_handlers.extend([ - conditional_get(cc_part, 'CloudConfigPartHandler'), - conditional_get(ss_part, 'ShellScriptPartHandler')]) - - return [x for x in def_handlers if x is not None] + return def_handlers def _default_userdata_handlers(self): - return self._get_default_handlers(user_data=True) + return self._default_handlers() - def _default_vendordata_handlers(self, excluded=None): - return self._get_default_handlers(vendor_data=True, excluded=excluded) + def _default_vendordata_handlers(self): + return self._default_handlers( + opts={'script_path': 'vendor_scripts', + 'cloud_config_path': 'vendor_cloud_config'}) - def _do_handlers(self, data_msg, c_handlers_list, frequency): + def _do_handlers(self, data_msg, c_handlers_list, frequency, + excluded=None): """ Generalized handlers suitable for use with either vendordata or userdata """ + if excluded is None: + excluded = [] + cdir = self.paths.get_cpath("handlers") idir = self._get_ipath("handlers") @@ -450,7 +434,7 @@ class Init(object): handlers.call_begin(mod, data, frequency) c_handlers.initialized.append(mod) - def walk_handlers(): + def walk_handlers(excluded): # Walk the user data part_data = { 'handlers': c_handlers, @@ -463,9 +447,9 @@ class Init(object): # to help write there contents to files with numbered # names... 'handlercount': 0, + 'excluded': excluded, } - handlers.walk(data_msg, handlers.walker_callback, - data=part_data) + handlers.walk(data_msg, handlers.walker_callback, data=part_data) def finalize_handlers(): # Give callbacks opportunity to finalize @@ -482,7 +466,7 @@ class Init(object): try: init_handlers() - walk_handlers() + walk_handlers(excluded) finally: finalize_handlers() @@ -503,67 +487,49 @@ class Init(object): # objects before the load of the userdata happened, # this is expected. - def _consume_vendordata(self, frequency=PER_ALWAYS): + def _consume_vendordata(self, frequency=PER_INSTANCE): """ Consume the vendordata and run the part handlers on it """ - if not self.datasource.has_vendordata(): - LOG.info("datasource did not provide vendor data") + # User-data should have been consumed first. + # So we merge the other available cloud-configs (everything except + # vendor provided), and check whether or not we should consume + # vendor data at all. That gives user or system a chance to override. + if not self.datasource.get_vendordata_raw(): + LOG.debug("no vendordata from datasource") return - # User-data should have been consumed first. If it has, then we can - # read it and simply parse it. This means that the datasource can - # define if the vendordata can be consumed too....i.e this method - # gives us a lot of flexibility. _cc_merger = helpers.ConfigMerger(paths=self._paths, datasource=self.datasource, additional_fns=[], base_cfg=self.cfg, include_vendor=False) - _cc = _cc_merger.cfg - - if not self.datasource.consume_vendordata(): - if not isinstance(_cc, dict): - LOG.info(("userdata does explicitly allow vendordata " - "consumption")) - return - - if 'vendor_data' not in _cc: - LOG.info(("no 'vendor_data' directive found in the" - "conf files. Skipping consumption of vendordata")) - return + vdcfg = _cc_merger.cfg.get('vendor_data', {}) - # This allows for the datasource to signal explicit conditions when - # when the user has opted in to user-data - if self.datasource.consume_vendordata(): - LOG.info(("datasource has indicated that vendordata that user" - " opted-in via another channel")) + if not isinstance(vdcfg, dict): + vdcfg = {'enabled': False} + LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg) - vdc = _cc.get('vendor_data') - no_handlers = None - if isinstance(vdc, dict): - enabled = vdc.get('enabled') - no_handlers = vdc.get('no_run') + if not util.is_true(vdcfg.get('enabled')): + LOG.debug("vendordata consumption is disabled.") + return - if enabled is None: - LOG.info("vendordata will not be consumed: user has not opted-in") - return - elif util.is_false(enabled): - LOG.info("user has requested NO vendordata consumption") - return + enabled = vdc.get('enabled') + no_handlers = vdc.get('disabled_handlers', None) - LOG.info("vendor data will be consumed") + LOG.debug("vendor data will be consumed. disabled_handlers=%s", + no_handlers) # Ensure vendordata source fetched before activation (just incase) - vendor_data_msg = self.datasource.get_vendordata(True) + vendor_data_msg = self.datasource.get_vendordata() # This keeps track of all the active handlers, while excluding what the # users doesn't want run, i.e. boot_hook, cloud_config, shell_script - c_handlers_list = self._default_vendordata_handlers( - excluded=no_handlers) + c_handlers_list = self._default_vendordata_handlers() # Run the handlers - self._do_handlers(vendor_data_msg, c_handlers_list, frequency) + self._do_handlers(vendor_data_msg, c_handlers_list, frequency, + excluded=no_handlers) def _consume_userdata(self, frequency=PER_INSTANCE): """ @@ -574,7 +540,7 @@ class Init(object): user_data_msg = self.datasource.get_userdata(True) # This keeps track of all the active handlers - c_handlers_list = self._default_userdata_handlers() + c_handlers_list = self._default_handlers() # Run the handlers self._do_handlers(user_data_msg, c_handlers_list, frequency) -- cgit v1.2.3 From b94c9790e055960fccf3b159d86db85ef37fb34f Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Thu, 16 Jan 2014 16:32:57 -0700 Subject: Fixed typos --- cloudinit/sources/DataSourceSmartOS.py | 2 +- cloudinit/sources/__init__.py | 2 +- cloudinit/stages.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index ccfee931..6593ce6e 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -155,7 +155,7 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud - self.vendordata_raw = vendordata + self.vendordata_raw = md['vendordata'] return True def device_name_to_device(self, name): diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 7e11c1ca..4b3bf62f 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -82,7 +82,7 @@ class DataSource(object): return self._filter_xdata(self.userdata) return self.userdata - def get_vendordata(self) + def get_vendordata(self): if self.vendordata is None: self.vendordata = self.ud_proc.process(self.get_vendordata_raw()) return self.vendordata diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 19fbe706..5dced998 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -514,8 +514,8 @@ class Init(object): LOG.debug("vendordata consumption is disabled.") return - enabled = vdc.get('enabled') - no_handlers = vdc.get('disabled_handlers', None) + enabled = vdcfg.get('enabled') + no_handlers = vdcfg.get('disabled_handlers', None) LOG.debug("vendor data will be consumed. disabled_handlers=%s", no_handlers) -- cgit v1.2.3 From 92aa725a284c08be9234bd792227e5896c4b1d1c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 16 Jan 2014 20:11:27 -0500 Subject: DataSourceOpenNebula:parse_shell_config skip 'SECONDS' var if seen SECONDS is a special variable in bash, it gets set to the time the shell has been alive. This would cause us to fail randomly (if the process happened to take more than 1 second, then SECONDS would be defined). --- cloudinit/sources/DataSourceOpenNebula.py | 2 +- tests/unittests/test_datasource/test_opennebula.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 07dc25ff..b0464cbb 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -323,7 +323,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None, (output, _error) = util.subp(cmd, data=bcmd) # exclude vars in bash that change on their own or that we used - excluded = ("RANDOM", "LINENO", "_", "__v") + excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v") preset = {} ret = {} target = None diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index e1812a88..ce9ee9f4 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -258,6 +258,14 @@ iface eth0 inet static ''') +class TestParseShellConfig(MockerTestCase): + def test_no_seconds(self): + cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"]) + # we could test 'sleep 2', but that would make the test run slower. + ret = ds.parse_shell_config(cfg); + self.assertEqual(ret, {"foo": "bar", "xx": "foo"}) + + def populate_context_dir(path, variables): data = "# Context variables generated by OpenNebula\n" for (k, v) in variables.iteritems(): -- cgit v1.2.3 From 91d15b934b655fc56af416309ee020caac24ca3f Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 17 Jan 2014 08:27:09 -0700 Subject: pep8 and pylint fixes; typo fix for documentation --- cloudinit/stages.py | 11 +++++------ doc/vendordata.txt | 2 +- tests/unittests/test_data.py | 4 +--- 3 files changed, 7 insertions(+), 10 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5dced998..8ae0bc0d 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -26,8 +26,7 @@ import copy import os import sys -from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES, - CLOUD_CONFIG) +from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG) from cloudinit import handlers @@ -510,13 +509,13 @@ class Init(object): vdcfg = {'enabled': False} LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg) - if not util.is_true(vdcfg.get('enabled')): - LOG.debug("vendordata consumption is disabled.") - return - enabled = vdcfg.get('enabled') no_handlers = vdcfg.get('disabled_handlers', None) + if not util.is_true(enabled): + LOG.debug("vendordata consumption is disabled.") + return + LOG.debug("vendor data will be consumed. disabled_handlers=%s", no_handlers) diff --git a/doc/vendordata.txt b/doc/vendordata.txt index 63a6c999..530e3b4c 100644 --- a/doc/vendordata.txt +++ b/doc/vendordata.txt @@ -20,7 +20,7 @@ user can accidently override choices. Further, we strongly advise vendors to not 'be evil'. By evil, we mean any action that could compromise a system. Since users trust you, please take care to make sure that any vendordata is safe, -atomic, indopenant and does not put your users at risk. +atomic, idempotent and does not put your users at risk. cloud-init can read this input and act on it in different ways. diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index a753debf..68729c57 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -264,13 +264,11 @@ vendor_data: freq=PER_INSTANCE) mods = stages.Modules(initer) (_which_ran, _failures) = mods.run_section('cloud_init_modules') - cfg = mods.cfg + _cfg = mods.cfg vendor_script = initer.paths.get_ipath_cur('vendor_scripts') vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script) self.assertTrue(os.path.exists(vendor_script_fns)) - - def test_merging_cloud_config(self): blob = ''' #cloud-config -- cgit v1.2.3 From 98fd17c55b637f4e1d136c954567c1d9b23e6c20 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Jan 2014 11:16:56 -0500 Subject: remove support for resizing via 'parted resizepart' This was previously broken anyway. It doesn't seem like there was an easy way to actually support it, so for now I'm removing it entirely. growpart works well enough. --- ChangeLog | 2 + cloudinit/config/cc_growpart.py | 28 +---------- doc/examples/cloud-config-growpart.txt | 4 +- .../test_handler/test_handler_growpart.py | 55 ++-------------------- 4 files changed, 9 insertions(+), 80 deletions(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index 1c240c68..46a27df3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -11,6 +11,8 @@ unicode). - config/cloud.cfg.d/05_logging.cfg: provide a default 'output' setting, to redirect cloud-init stderr and stdout /var/log/cloud-init-output.log. + - drop support for resizing partitions with parted entirely (LP: #1212492). + This was broken as it was anyway. 0.7.4: - fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a partitioned block device with target filesystem on ephemeral0.1. diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 0dd92a46..6bddf847 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -80,30 +80,6 @@ class ResizeFailedException(Exception): pass -class ResizeParted(object): - def available(self): - myenv = os.environ.copy() - myenv['LANG'] = 'C' - - try: - (out, _err) = util.subp(["parted", "--help"], env=myenv) - if re.search(r"COMMAND.*resizepart\s+", out, re.DOTALL): - return True - - except util.ProcessExecutionError: - pass - return False - - def resize(self, diskdev, partnum, partdev): - before = get_size(partdev) - try: - util.subp(["parted", diskdev, "resizepart", partnum]) - except util.ProcessExecutionError as e: - raise ResizeFailedException(e) - - return (before, get_size(partdev)) - - class ResizeGrowPart(object): def available(self): myenv = os.environ.copy() @@ -279,6 +255,4 @@ def handle(_name, cfg, _cloud, log, _args): else: log.debug("'%s' %s: %s" % (entry, action, msg)) -# LP: 1212444 FIXME re-order and favor ResizeParted -#RESIZERS = (('growpart', ResizeGrowPart),) -RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted)) +RESIZERS = (('growpart', ResizeGrowPart),) diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt index a459573d..393d5164 100644 --- a/doc/examples/cloud-config-growpart.txt +++ b/doc/examples/cloud-config-growpart.txt @@ -5,12 +5,10 @@ # # mode: # values: -# * auto: use any option possible (growpart or parted) +# * auto: use any option possible (any available) # if none are available, do not warn, but debug. # * growpart: use growpart to grow partitions # if growpart is not available, this is an error. -# * parted: use parted (parted resizepart) to resize partitions -# if parted is not available, this is an error. # * off, false # # devices: diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index c0497e08..996526d3 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -12,50 +12,9 @@ import re import unittest # growpart: -# mode: auto # off, on, auto, 'growpart', 'parted' +# mode: auto # off, on, auto, 'growpart' # devices: ['root'] -HELP_PARTED_NO_RESIZE = """ -Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...] -Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in -interactive mode. - -OPTIONs: - - -COMMANDs: - - quit exit program - rescue START END rescue a lost partition near START - and END - resize NUMBER START END resize partition NUMBER and its file - system - rm NUMBER delete partition NUMBER - -Report bugs to bug-parted@gnu.org -""" - -HELP_PARTED_RESIZE = """ -Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...] -Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in -interactive mode. - -OPTIONs: - - -COMMANDs: - - quit exit program - rescue START END rescue a lost partition near START - and END - resize NUMBER START END resize partition NUMBER and its file - system - resizepart NUMBER END resize partition NUMBER - rm NUMBER delete partition NUMBER - -Report bugs to bug-parted@gnu.org -""" - HELP_GROWPART_RESIZE = """ growpart disk partition rewrite partition table so that partition takes up all the space it can @@ -122,11 +81,8 @@ class TestConfig(MockerTestCase): # Order must be correct self.mocker.order() - @unittest.skip("until LP: #1212444 fixed") def test_no_resizers_auto_is_fine(self): subp = self.mocker.replace(util.subp, passthrough=False) - subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_NO_RESIZE, "")) subp(['growpart', '--help'], env={'LANG': 'C'}) self.mocker.result((HELP_GROWPART_NO_RESIZE, "")) self.mocker.replay() @@ -144,15 +100,14 @@ class TestConfig(MockerTestCase): self.assertRaises(ValueError, self.handle, self.name, config, self.cloud_init, self.log, self.args) - @unittest.skip("until LP: #1212444 fixed") - def test_mode_auto_prefers_parted(self): + def test_mode_auto_prefers_growpart(self): subp = self.mocker.replace(util.subp, passthrough=False) - subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_RESIZE, "")) + subp(['growpart', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_GROWPART_RESIZE, "")) self.mocker.replay() ret = cc_growpart.resizer_factory(mode="auto") - self.assertTrue(isinstance(ret, cc_growpart.ResizeParted)) + self.assertTrue(isinstance(ret, cc_growpart.ResizeGrowPart)) def test_handle_with_no_growpart_entry(self): #if no 'growpart' entry in config, then mode=auto should be used -- cgit v1.2.3 From 63a3fb1912f721ff161d75d956f6172a31971956 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Jan 2014 15:12:31 -0500 Subject: remove unused mergedvendoruser --- cloudinit/helpers.py | 1 - 1 file changed, 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index f9da697c..e701126e 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -350,7 +350,6 @@ class Paths(object): "data": "data", "vendordata_raw": "vendor-data.txt", "vendordata": "vendor-data.txt.i", - "mergedvendoruser": "vendor-user-data.txt", } # Set when a datasource becomes active self.datasource = ds -- cgit v1.2.3 From 1729b161c7569ec60ac6102a046e0b8c22457b7c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Jan 2014 15:20:21 -0500 Subject: remove creation of some vestigial dirs --- cloudinit/stages.py | 3 --- 1 file changed, 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8ae0bc0d..593b72a2 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -124,9 +124,6 @@ class Init(object): os.path.join(c_dir, 'scripts', 'per-once'), os.path.join(c_dir, 'scripts', 'per-boot'), os.path.join(c_dir, 'scripts', 'vendor'), - os.path.join(c_dir, 'scripts', 'vendor', 'per-boot'), - os.path.join(c_dir, 'scripts', 'vendor', 'per-instance'), - os.path.join(c_dir, 'scripts', 'vendor', 'per-once'), os.path.join(c_dir, 'seed'), os.path.join(c_dir, 'instances'), os.path.join(c_dir, 'handlers'), -- cgit v1.2.3 From cbc5af7396743e014f1ad9ece0bb56820d26f484 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Jan 2014 17:08:58 -0500 Subject: more boto removal. move httpretty from 'Requires' the Requires would get that string rendered into the package's Depends/Requires (rather than BuildDepends/BuildRequires). We should have BuildDepends/BuildRequires too, but since trunk's package builds do not run 'make test', this isn't a big deal. This also adds 'test-requires' for httpretty. --- Requires | 3 --- cloudinit/ec2_utils.py | 3 --- doc/rtd/topics/datasources.rst | 4 ---- packages/bddeb | 1 - packages/brpm | 2 -- packages/debian/copyright | 22 ---------------------- test-requires | 1 + 7 files changed, 1 insertion(+), 35 deletions(-) create mode 100644 test-requires (limited to 'cloudinit') diff --git a/Requires b/Requires index e847506f..8f695c68 100644 --- a/Requires +++ b/Requires @@ -31,6 +31,3 @@ requests # For patching pieces of cloud-config together jsonpatch - -# For http testing (only needed for testing) -httpretty>=0.7.1 diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index c86623bc..92a22747 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -49,9 +49,6 @@ def combine_url(base, add_on): # See: http://bit.ly/TyoUQs # -# Since boto metadata reader uses the old urllib which does not -# support ssl, we need to ahead and create our own reader which -# works the same as the boto one (for now). class MetadataMaterializer(object): def __init__(self, blob, base_url, caller): self._blob = blob diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 5543ed34..cc0d0ede 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -130,10 +130,6 @@ To see which versions are supported from your cloud provider use the following U ... latest -**Note:** internally in cloudinit the `boto`_ library used to fetch the instance -userdata and instance metadata, feel free to check that library out, it provides -many other useful EC2 functionality. - --------------------------- Config Drive --------------------------- diff --git a/packages/bddeb b/packages/bddeb index f52eb55f..9552aa40 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -29,7 +29,6 @@ import argparse # file pypi package name to a debian/ubuntu package name. PKG_MP = { 'argparse': 'python-argparse', - 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', 'jsonpatch': 'python-jsonpatch | python-json-patch', diff --git a/packages/brpm b/packages/brpm index 8c90a0ab..f8ba1db1 100755 --- a/packages/brpm +++ b/packages/brpm @@ -36,7 +36,6 @@ from cloudinit import util PKG_MP = { 'redhat': { 'argparse': 'python-argparse', - 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', 'jsonpatch': 'python-jsonpatch', @@ -48,7 +47,6 @@ PKG_MP = { }, 'suse': { 'argparse': 'python-argparse', - 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', 'jsonpatch': 'python-jsonpatch', diff --git a/packages/debian/copyright b/packages/debian/copyright index dc993525..f55bb7a3 100644 --- a/packages/debian/copyright +++ b/packages/debian/copyright @@ -27,25 +27,3 @@ License: GPL-3 The complete text of the GPL version 3 can be seen in /usr/share/common-licenses/GPL-3. - -Files: cloudinit/boto_utils.py -Copyright: 2006,2007, Mitch Garnaat http://garnaat.org/ -License: MIT - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, dis- - tribute, sublicense, and/or sell copies of the Software, and to permit - persons to whom the Software is furnished to do so, subject to the fol- - lowing conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- - ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT - SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - IN THE SOFTWARE. diff --git a/test-requires b/test-requires new file mode 100644 index 00000000..6cee1c44 --- /dev/null +++ b/test-requires @@ -0,0 +1 @@ +httpretty>=0.7.1 -- cgit v1.2.3 From 31ece5e92797bf20141878de5dd7cd91559cb336 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 22 Jan 2014 12:04:39 -0800 Subject: Split net-parsing into own module The ubuntu/debian networking file parsing function really is more generic than just a rhel utility function and can be used by others that want to use this functionality for there own purposes (say in writing down a freebsd network format instead) so moving this to its own module to encourage its usage outside of rhel. --- cloudinit/distros/net_util.py | 110 +++++++++++++++++++++++++++++++++++++++++ cloudinit/distros/rhel.py | 4 +- cloudinit/distros/rhel_util.py | 88 --------------------------------- cloudinit/distros/sles.py | 4 +- 4 files changed, 116 insertions(+), 90 deletions(-) create mode 100644 cloudinit/distros/net_util.py (limited to 'cloudinit') diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py new file mode 100644 index 00000000..4c9095be --- /dev/null +++ b/cloudinit/distros/net_util.py @@ -0,0 +1,110 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# This is a util function to translate debian based distro interface blobs as +# given in /etc/network/interfaces to an *somewhat* agnostic format for +# distributions that use other formats. +# +# TODO(harlowja) remove when we have python-netcf active... +def translate_network(settings): + # Get the standard cmd, args from the ubuntu format + entries = [] + for line in settings.splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + split_up = line.split(None, 1) + if len(split_up) <= 1: + continue + entries.append(split_up) + # Figure out where each iface section is + ifaces = [] + consume = {} + for (cmd, args) in entries: + if cmd == 'iface': + if consume: + ifaces.append(consume) + consume = {} + consume[cmd] = args + else: + consume[cmd] = args + # Check if anything left over to consume + absorb = False + for (cmd, args) in consume.iteritems(): + if cmd == 'iface': + absorb = True + if absorb: + ifaces.append(consume) + # Now translate + real_ifaces = {} + for info in ifaces: + if 'iface' not in info: + continue + iface_details = info['iface'].split(None) + dev_name = None + if len(iface_details) >= 1: + dev = iface_details[0].strip().lower() + if dev: + dev_name = dev + if not dev_name: + continue + iface_info = {} + if len(iface_details) >= 3: + proto_type = iface_details[2].strip().lower() + # Seems like this can be 'loopback' which we don't + # really care about + if proto_type in ['dhcp', 'static']: + iface_info['bootproto'] = proto_type + # These can just be copied over + for k in ['netmask', 'address', 'gateway', 'broadcast']: + if k in info: + val = info[k].strip().lower() + if val: + iface_info[k] = val + # Name server info provided?? + if 'dns-nameservers' in info: + iface_info['dns-nameservers'] = info['dns-nameservers'].split() + # Name server search info provided?? + if 'dns-search' in info: + iface_info['dns-search'] = info['dns-search'].split() + # Is any mac address spoofing going on?? + if 'hwaddress' in info: + hw_info = info['hwaddress'].lower().strip() + hw_split = hw_info.split(None, 1) + if len(hw_split) == 2 and hw_split[0].startswith('ether'): + hw_addr = hw_split[1] + if hw_addr: + iface_info['hwaddress'] = hw_addr + real_ifaces[dev_name] = iface_info + # Check for those that should be started on boot via 'auto' + for (cmd, args) in entries: + if cmd == 'auto': + # Seems like auto can be like 'auto eth0 eth0:1' so just get the + # first part out as the device name + args = args.split(None) + if not args: + continue + dev_name = args[0].strip().lower() + if dev_name in real_ifaces: + real_ifaces[dev_name]['auto'] = True + return real_ifaces diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 30195384..6087929e 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -25,7 +25,9 @@ from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit.distros import net_util from cloudinit.distros import rhel_util + from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -63,7 +65,7 @@ class Distro(distros.Distro): def _write_network(self, settings): # TODO(harlowja) fix this... since this is the ubuntu format - entries = rhel_util.translate_network(settings) + entries = net_util.translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) # Make the intermediate format as the rhel format... diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py index 1aba58b8..063d536e 100644 --- a/cloudinit/distros/rhel_util.py +++ b/cloudinit/distros/rhel_util.py @@ -30,94 +30,6 @@ from cloudinit import util LOG = logging.getLogger(__name__) -# This is a util function to translate Debian based distro interface blobs as -# given in /etc/network/interfaces to an equivalent format for distributions -# that use ifcfg-* style (Red Hat and SUSE). -# TODO(harlowja) remove when we have python-netcf active... -def translate_network(settings): - # Get the standard cmd, args from the ubuntu format - entries = [] - for line in settings.splitlines(): - line = line.strip() - if not line or line.startswith("#"): - continue - split_up = line.split(None, 1) - if len(split_up) <= 1: - continue - entries.append(split_up) - # Figure out where each iface section is - ifaces = [] - consume = {} - for (cmd, args) in entries: - if cmd == 'iface': - if consume: - ifaces.append(consume) - consume = {} - consume[cmd] = args - else: - consume[cmd] = args - # Check if anything left over to consume - absorb = False - for (cmd, args) in consume.iteritems(): - if cmd == 'iface': - absorb = True - if absorb: - ifaces.append(consume) - # Now translate - real_ifaces = {} - for info in ifaces: - if 'iface' not in info: - continue - iface_details = info['iface'].split(None) - dev_name = None - if len(iface_details) >= 1: - dev = iface_details[0].strip().lower() - if dev: - dev_name = dev - if not dev_name: - continue - iface_info = {} - if len(iface_details) >= 3: - proto_type = iface_details[2].strip().lower() - # Seems like this can be 'loopback' which we don't - # really care about - if proto_type in ['dhcp', 'static']: - iface_info['bootproto'] = proto_type - # These can just be copied over - for k in ['netmask', 'address', 'gateway', 'broadcast']: - if k in info: - val = info[k].strip().lower() - if val: - iface_info[k] = val - # Name server info provided?? - if 'dns-nameservers' in info: - iface_info['dns-nameservers'] = info['dns-nameservers'].split() - # Name server search info provided?? - if 'dns-search' in info: - iface_info['dns-search'] = info['dns-search'].split() - # Is any mac address spoofing going on?? - if 'hwaddress' in info: - hw_info = info['hwaddress'].lower().strip() - hw_split = hw_info.split(None, 1) - if len(hw_split) == 2 and hw_split[0].startswith('ether'): - hw_addr = hw_split[1] - if hw_addr: - iface_info['hwaddress'] = hw_addr - real_ifaces[dev_name] = iface_info - # Check for those that should be started on boot via 'auto' - for (cmd, args) in entries: - if cmd == 'auto': - # Seems like auto can be like 'auto eth0 eth0:1' so just get the - # first part out as the device name - args = args.split(None) - if not args: - continue - dev_name = args[0].strip().lower() - if dev_name in real_ifaces: - real_ifaces[dev_name]['auto'] = True - return real_ifaces - - # Helper function to update a RHEL/SUSE /etc/sysconfig/* file def update_sysconfig_file(fn, adjustments, allow_empty=False): if not adjustments: diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index f2ac4efc..239e51b5 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -26,7 +26,9 @@ from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit.distros import net_util from cloudinit.distros import rhel_util + from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -54,7 +56,7 @@ class Distro(distros.Distro): def _write_network(self, settings): # Convert debian settings to ifcfg format - entries = rhel_util.translate_network(settings) + entries = net_util.translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) # Make the intermediate format as the suse format... -- cgit v1.2.3 From 5f14b91c2a17101e845ee18068c7bb8c3c7a0556 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 22 Jan 2014 12:39:37 -0800 Subject: Add comments as to format with example in/out --- cloudinit/distros/net_util.py | 52 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index 4c9095be..8c781f9a 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -26,6 +26,58 @@ # distributions that use other formats. # # TODO(harlowja) remove when we have python-netcf active... +# +# The format is the following: +# { +# : { +# # All optional (if not existent in original format) +# "netmask": , +# "broadcast": , +# "gateway": , +# "address": , +# "bootproto": "static"|"dhcp", +# "dns-search": , +# "hwaddress": , +# "auto": True (or non-existent), +# "dns-nameservers": [, ...], +# } +# } +# +# Things to note, comments are removed, if a ubuntu/debian interface is +# marked as auto then only then first segment (?) is retained, ie +# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1). +# +# Example input: +# +# auto lo +# iface lo inet loopback +# +# auto eth0 +# iface eth0 inet static +# address 10.0.0.1 +# netmask 255.255.252.0 +# broadcast 10.0.0.255 +# gateway 10.0.0.2 +# dns-nameservers 98.0.0.1 98.0.0.2 +# +# Example output: +# { +# "lo": { +# "auto": true +# }, +# "eth0": { +# "auto": true, +# "dns-nameservers": [ +# "98.0.0.1", +# "98.0.0.2" +# ], +# "broadcast": "10.0.0.255", +# "netmask": "255.255.252.0", +# "bootproto": "static", +# "address": "10.0.0.1", +# "gateway": "10.0.0.2" +# } +# } def translate_network(settings): # Get the standard cmd, args from the ubuntu format entries = [] -- cgit v1.2.3 From efe969756d0208433e953d1692ea85006a56abe3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 22 Jan 2014 12:47:51 -0800 Subject: Add a new line --- cloudinit/distros/net_util.py | 1 + 1 file changed, 1 insertion(+) (limited to 'cloudinit') diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index 8c781f9a..5f60666d 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -78,6 +78,7 @@ # "gateway": "10.0.0.2" # } # } + def translate_network(settings): # Get the standard cmd, args from the ubuntu format entries = [] -- cgit v1.2.3 From 84514cdff8ff025df052fe6301d2a7ed751d7d61 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Jan 2014 16:54:07 -0500 Subject: cc_resizefs: figure out what /dev/root means via kernel cmdline If mount_info says that the root filesystem is on /dev/root and /dev/root does not exist, then we'll try to glean that information from the linux kernel cmdline. This situation occurs at least when you boot without an initramfs for the current ppc64el cloud images: qemu-system-ppc64 ... -kernel my.kernel -append 'root=/dev/sda' When doing that, /proc/1/mountinfo will say '/dev/root' for '/'. --- ChangeLog | 2 ++ cloudinit/config/cc_resizefs.py | 33 +++++++++++++++++++++++++++++++-- 2 files changed, 33 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index cb9586f0..32f2a8d0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -17,6 +17,8 @@ - drop dependency on boto for crawling ec2 metadata service. - add 'Requires' on sudo (for OpenNebula datasource) in rpm specs, and 'Recommends' in the debian/control.in [Vlastimil Holer] + - if mount_info reports /dev/root is a device path for /, then convert + that to a device via help of kernel cmdline. 0.7.4: - fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a partitioned block device with target filesystem on ephemeral0.1. diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 56040fdd..388ca66f 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -51,6 +51,25 @@ RESIZE_FS_PREFIXES_CMDS = [ NOBLOCK = "noblock" +def rootdev_from_cmdline(cmdline): + found = None + for tok in cmdline.split(): + if tok.startswith("root="): + found = tok[5:] + break + if found is None: + return None + + if found.startswith("/dev/"): + return found + if found.startswith("LABEL="): + return "/dev/disk/by-label/" + found[len("LABEL="):] + if found.startswith("UUID="): + return "/dev/disk/by-uuid/" + found[len("UUID="):] + + return "/dev/" + found + + def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] @@ -78,10 +97,20 @@ def handle(name, cfg, _cloud, log, args): info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) log.debug("resize_info: %s" % info) + container = util.is_container() + + if (devpth == "/dev/root" and not os.path.exists(devpth) and + not container): + devpth = rootdev_from_cmdline(util.get_cmdline()) + if devpth is None: + log.warn("Unable to find device '/dev/root'") + return + log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth) + try: statret = os.stat(devpth) except OSError as exc: - if util.is_container() and exc.errno == errno.ENOENT: + if container and exc.errno == errno.ENOENT: log.debug("Device '%s' did not exist in container. " "cannot resize: %s" % (devpth, info)) elif exc.errno == errno.ENOENT: @@ -92,7 +121,7 @@ def handle(name, cfg, _cloud, log, args): return if not stat.S_ISBLK(statret.st_mode): - if util.is_container(): + if container: log.debug("device '%s' not a block device in container." " cannot resize: %s" % (devpth, info)) else: -- cgit v1.2.3 From c0ee33fc1d70b272a96430def658afd6f1867afa Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 14:06:13 -0500 Subject: remove some white space --- cloudinit/distros/rhel.py | 1 - cloudinit/distros/sles.py | 1 - 2 files changed, 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 6087929e..e8abf111 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -27,7 +27,6 @@ from cloudinit import util from cloudinit.distros import net_util from cloudinit.distros import rhel_util - from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index 239e51b5..9788a1ba 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -28,7 +28,6 @@ from cloudinit import util from cloudinit.distros import net_util from cloudinit.distros import rhel_util - from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -- cgit v1.2.3 From c833a84f08019ba4413937f2f1b1f12a4ffe5632 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 14:28:59 -0500 Subject: pep8 --- cloudinit/config/cc_debug.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index cfd31fa1..7219b0f8 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -14,10 +14,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO -from cloudinit import util from cloudinit import type_utils +from cloudinit import util import copy +from StringIO import StringIO def _make_header(text): -- cgit v1.2.3