From d1e26fc118cdb641829fbe6b838ef46d4ab1f113 Mon Sep 17 00:00:00 2001 From: Kiril Vladimiroff Date: Wed, 19 Feb 2014 10:45:53 +0200 Subject: Read encoded with base64 user data This allows users of CloudSigma's VM to encode their user data with base64. In order to do that thet have to add the ``cloudinit-user-data`` field to the ``base64_fields``. The latter is a comma-separated field with all the meta fields whit base64 encoded values. --- cloudinit/sources/DataSourceCloudSigma.py | 5 +++++ doc/sources/cloudsigma/README.rst | 4 ++++ tests/unittests/test_datasource/test_cloudsigma.py | 15 +++++++++++++-- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index e734d7e5..79ced3f4 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -15,6 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from base64 import b64decode import re from cloudinit import log as logging @@ -60,7 +61,11 @@ class DataSourceCloudSigma(sources.DataSource): if dsmode == "disabled" or dsmode != self.dsmode: return False + base64_fields = server_meta.get('base64_fields', '').split(',') self.userdata_raw = server_meta.get('cloudinit-user-data', "") + if 'cloudinit-user-data' in base64_fields: + self.userdata_raw = b64decode(self.userdata_raw) + self.metadata = server_context self.ssh_public_key = server_meta['ssh_public_key'] diff --git a/doc/sources/cloudsigma/README.rst b/doc/sources/cloudsigma/README.rst index 1d9160a2..6509b585 100644 --- a/doc/sources/cloudsigma/README.rst +++ b/doc/sources/cloudsigma/README.rst @@ -23,6 +23,10 @@ You can provide user-data to the VM using the dedicated `meta field`_ in the `se header could be omitted. However since this is a raw-text field you could provide any of the valid `config formats`_. +You have the option to encode your user-data using Base64. In order to do that you have to add the +``cloudinit-user-data`` field to the ``base64_fields``. The latter is a comma-separated field with +all the meta fields whit base64 encoded values. + If your user-data does not need an internet connection you can create a `meta field`_ in the `server context`_ ``cloudinit-dsmode`` and set "local" as value. If this field does not exist the default value is "net". diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index 3245aba1..adbb4afb 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -1,4 +1,5 @@ # coding: utf-8 +import copy from unittest import TestCase from cloudinit.cs_utils import Cepko @@ -24,7 +25,8 @@ SERVER_CONTEXT = { class CepkoMock(Cepko): - result = SERVER_CONTEXT + def __init__(self, mocked_context): + self.result = mocked_context def all(self): return self @@ -33,7 +35,7 @@ class CepkoMock(Cepko): class DataSourceCloudSigmaTest(TestCase): def setUp(self): self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") - self.datasource.cepko = CepkoMock() + self.datasource.cepko = CepkoMock(SERVER_CONTEXT) self.datasource.get_data() def test_get_hostname(self): @@ -57,3 +59,12 @@ class DataSourceCloudSigmaTest(TestCase): def test_user_data(self): self.assertEqual(self.datasource.userdata_raw, SERVER_CONTEXT['meta']['cloudinit-user-data']) + + def test_encoded_user_data(self): + encoded_context = copy.deepcopy(SERVER_CONTEXT) + encoded_context['meta']['base64_fields'] = 'cloudinit-user-data' + encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK' + self.datasource.cepko = CepkoMock(encoded_context) + self.datasource.get_data() + + self.assertEqual(self.datasource.userdata_raw, b'hi world\n') -- cgit v1.2.3 From 9730b1470f029a514cacbba197c7946d8fedf3d4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Feb 2014 13:11:38 -0500 Subject: initial commit for status --- bin/cloud-init | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 88 insertions(+), 2 deletions(-) diff --git a/bin/cloud-init b/bin/cloud-init index 80a1df05..e22f54de 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -418,6 +418,92 @@ def main_single(name, args): # Guess it worked return 0 +def status_wrapper(args): + (name, functor) = args.action + + if args.name: + if args.local: + mode = "init-local" + else: + mode = "init" + elif args.name == "modules": + mode = "modules-%s" % args.mode + + modes = ('init', 'init-local', 'modules-config', 'modules-final') + + if mode == 'init': + nullstatus = { + 'errors': [] + 'state': None + 'start': None + 'end': None + } + status = {'v1': {}} + for mode in modes: + status['v1'][mode] = nullstatus.copy() + else: + status = load_status() + status['stage'] = mode + + v1 = status['v1'] + v1[mode]['start'] = time.time() + update_status(status) + # status + # { + # 'v1': { + # 'init': { + # errors: [] + # start: + # end: + # }, + # 'init-local': { + # errors: [] + # start: + # end: + # }, + # 'modules-final': { + # }, + # 'modules-config': { + # }, + # 'datasource': None + # 'stage': ('init', 'init-local', 'modules-final', 'modules-config', 'finished') + # 'errors': + # } + # finished + # { + # 'datasource': + # 'errors': + # } + # + # + exception = None + try: + ret = func(args) + except Exception as e: + v1[mode]['errors'] = [str(e)] + + v1[mode]['finished'] = time.time() + v1['stage'] = None + + + if mode in ('init' or 'init-local'): + # FIXME(smoser): add the datasource here + v1['datasource'] = "~~~datasource~~~" + + update_status(status) + + if mode == "modules-final": + # write the 'finished' file + errors = [] + for m in modes: + if v1[m]['errors']: + errors += v1[m]['errors'] + + finished = {'datasource': v1['datasource'], + 'errors': errors} + + return ret + def main(): parser = argparse.ArgumentParser() @@ -450,7 +536,7 @@ def main(): default=False) # This is used so that we can know which action is selected + # the functor to use to run this subcommand - parser_init.set_defaults(action=('init', main_init)) + parser_init.set_defaults(action=('init', status_wrapper)) # These settings are used for the 'config' and 'final' stages parser_mod = subparsers.add_parser('modules', @@ -461,7 +547,7 @@ def main(): "to use (default: %(default)s)"), default='config', choices=('init', 'config', 'final')) - parser_mod.set_defaults(action=('modules', main_modules)) + parser_mod.set_defaults(action=('modules', status_wrapper)) # These settings are used when you want to query information # stored in the cloud-init data objects/directories/files -- cgit v1.2.3 From 2781fc8289e4aab130125b1a3e69b45a9318f805 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Feb 2014 16:27:28 -0500 Subject: possibly functional start testing --- bin/cloud-init | 128 +++++++++++++++++++++++++++++---------------------------- 1 file changed, 66 insertions(+), 62 deletions(-) diff --git a/bin/cloud-init b/bin/cloud-init index e22f54de..dc480901 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -22,8 +22,10 @@ # along with this program. If not, see . import argparse +import json import os import sys +import time import traceback # This is more just for running from the bin folder so that @@ -126,11 +128,11 @@ def run_module_section(mods, action_name, section): " under section '%s'") % (action_name, full_section_name) sys.stderr.write("%s\n" % (msg)) LOG.debug(msg) - return 0 + return [] else: LOG.debug("Ran %s modules with %s failures", len(which_ran), len(failures)) - return len(failures) + return failures def main_init(name, args): @@ -220,7 +222,7 @@ def main_init(name, args): if existing_files: LOG.debug("Exiting early due to the existence of %s files", existing_files) - return 0 + return (None, []) else: # The cache is not instance specific, so it has to be purged # but we want 'start' to benefit from a cache if @@ -249,9 +251,9 @@ def main_init(name, args): " Likely bad things to come!")) if not args.force: if args.local: - return 0 + return (None, []) else: - return 1 + return (None, ["No instance datasource found."]) # Stage 6 iid = init.instancify() LOG.debug("%s will now be targeting instance id: %s", name, iid) @@ -274,7 +276,7 @@ def main_init(name, args): init.consume_data(PER_ALWAYS) except Exception: util.logexc(LOG, "Consuming user data failed!") - return 1 + return (init.datasource, ["Consuming user data failed!"]) # Stage 8 - re-read and apply relevant cloud-config to include user-data mods = stages.Modules(init, extract_fns(args)) @@ -291,7 +293,7 @@ def main_init(name, args): logging.setupLogging(mods.cfg) # Stage 10 - return run_module_section(mods, name, name) + return (init.datasource, run_module_section(mods, name, name)) def main_modules(action_name, args): @@ -315,14 +317,12 @@ def main_modules(action_name, args): init.fetch() except sources.DataSourceNotFoundException: # There was no datasource found, theres nothing to do - util.logexc(LOG, ('Can not apply stage %s, ' - 'no datasource found!' - " Likely bad things to come!"), name) - print_exc(('Can not apply stage %s, ' - 'no datasource found!' - " Likely bad things to come!") % (name)) + msg = ('Can not apply stage %s, no datasource found! Likely bad ' + 'things to come!' % name) + util.logexc(LOG, msg) + print_exc(msg) if not args.force: - return 1 + return [(msg)] # Stage 3 mods = stages.Modules(init, extract_fns(args)) # Stage 4 @@ -418,8 +418,21 @@ def main_single(name, args): # Guess it worked return 0 -def status_wrapper(args): - (name, functor) = args.action + +def status_wrapper(args, data_d=None, link_d=None): + if data_d is None: + data_d = os.path.normpath("/var/lib/cloud/data") + if link_d is None: + link_d = os.path.normpath("/run/cloud-init") + + status_path = os.path.join(data_d, "status.json") + status_link = os.path.join(link_d, "status.json") + result_path = os.path.join(data_d, "result.json") + result_link = os.path.join(link_d, "result.json") + + util.ensure_dirs((data_d, link_d,)) + + (_name, functor) = args.action if args.name: if args.local: @@ -431,78 +444,69 @@ def status_wrapper(args): modes = ('init', 'init-local', 'modules-config', 'modules-final') - if mode == 'init': + status = None + if mode == 'init-local': + for f in (status_link, result_link, status_path, result_path): + util.del_file(f) + else: + try: + status = json.loads(util.load_file(status_path)) + except: + pass + + if status is None: nullstatus = { - 'errors': [] - 'state': None - 'start': None - 'end': None + 'errors': [], + 'state': None, + 'start': None, + 'end': None, } status = {'v1': {}} for mode in modes: status['v1'][mode] = nullstatus.copy() - else: - status = load_status() + status['v1']['datasource'] = None + status['stage'] = mode v1 = status['v1'] v1[mode]['start'] = time.time() - update_status(status) - # status - # { - # 'v1': { - # 'init': { - # errors: [] - # start: - # end: - # }, - # 'init-local': { - # errors: [] - # start: - # end: - # }, - # 'modules-final': { - # }, - # 'modules-config': { - # }, - # 'datasource': None - # 'stage': ('init', 'init-local', 'modules-final', 'modules-config', 'finished') - # 'errors': - # } - # finished - # { - # 'datasource': - # 'errors': - # } - # - # - exception = None + + util.write_file(status_path, json.dumps(status)) + util.sym_link(os.path.relpath(os.path.status_path, link_d), status_link) + try: - ret = func(args) + ret = functor(args) except Exception as e: v1[mode]['errors'] = [str(e)] v1[mode]['finished'] = time.time() v1['stage'] = None + if mode in ('init', 'init-local'): + (datasource, errors) = ret + if datasource is not None: + v1['datasource'] = datasource + v1[mode]['errors'] = errors + else: + errors = ret + v1[mode]['errors'] = ret - if mode in ('init' or 'init-local'): - # FIXME(smoser): add the datasource here - v1['datasource'] = "~~~datasource~~~" - - update_status(status) + util.write_file(status_path, json.dumps(status)) if mode == "modules-final": # write the 'finished' file errors = [] for m in modes: if v1[m]['errors']: - errors += v1[m]['errors'] - + errors.extend(v1[m].get('errors', [])) + finished = {'datasource': v1['datasource'], 'errors': errors} + util.write_file(result_path, json.dumps(finished)) + util.sym_link(os.path.relpath(os.path.result_path, link_d), + result_link) - return ret + return len(v1[mode]['errors']) def main(): -- cgit v1.2.3 From da13f065c9a2be372fea35db62e51086d443f8dc Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Feb 2014 17:20:12 -0500 Subject: fixes from testing, force symlink --- bin/cloud-init | 48 ++++++++++++++++++++++++++---------------------- cloudinit/util.py | 4 +++- 2 files changed, 29 insertions(+), 23 deletions(-) diff --git a/bin/cloud-init b/bin/cloud-init index dc480901..78f8600d 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -419,7 +419,7 @@ def main_single(name, args): return 0 -def status_wrapper(args, data_d=None, link_d=None): +def status_wrapper(name, args, data_d=None, link_d=None): if data_d is None: data_d = os.path.normpath("/var/lib/cloud/data") if link_d is None: @@ -434,13 +434,15 @@ def status_wrapper(args, data_d=None, link_d=None): (_name, functor) = args.action - if args.name: + if name == "init": if args.local: mode = "init-local" else: mode = "init" - elif args.name == "modules": + elif name == "modules": mode = "modules-%s" % args.mode + else: + raise ValueError("unknown name: %s" % name) modes = ('init', 'init-local', 'modules-config', 'modules-final') @@ -457,40 +459,40 @@ def status_wrapper(args, data_d=None, link_d=None): if status is None: nullstatus = { 'errors': [], - 'state': None, 'start': None, 'end': None, } status = {'v1': {}} - for mode in modes: - status['v1'][mode] = nullstatus.copy() + for m in modes: + status['v1'][m] = nullstatus.copy() status['v1']['datasource'] = None - status['stage'] = mode v1 = status['v1'] + v1['stage'] = mode v1[mode]['start'] = time.time() util.write_file(status_path, json.dumps(status)) - util.sym_link(os.path.relpath(os.path.status_path, link_d), status_link) + util.sym_link(os.path.relpath(status_path, link_d), status_link, + force=True) try: - ret = functor(args) + ret = functor(name, args) + if mode in ('init', 'init-local'): + (datasource, errors) = ret + if datasource is not None: + v1['datasource'] = datasource + v1[mode]['errors'] = errors + else: + errors = ret + v1[mode]['errors'] = ret + except Exception as e: v1[mode]['errors'] = [str(e)] v1[mode]['finished'] = time.time() v1['stage'] = None - if mode in ('init', 'init-local'): - (datasource, errors) = ret - if datasource is not None: - v1['datasource'] = datasource - v1[mode]['errors'] = errors - else: - errors = ret - v1[mode]['errors'] = ret - util.write_file(status_path, json.dumps(status)) if mode == "modules-final": @@ -503,8 +505,8 @@ def status_wrapper(args, data_d=None, link_d=None): finished = {'datasource': v1['datasource'], 'errors': errors} util.write_file(result_path, json.dumps(finished)) - util.sym_link(os.path.relpath(os.path.result_path, link_d), - result_link) + util.sym_link(os.path.relpath(result_path, link_d), result_link, + force=True) return len(v1[mode]['errors']) @@ -540,7 +542,7 @@ def main(): default=False) # This is used so that we can know which action is selected + # the functor to use to run this subcommand - parser_init.set_defaults(action=('init', status_wrapper)) + parser_init.set_defaults(action=('init', main_init)) # These settings are used for the 'config' and 'final' stages parser_mod = subparsers.add_parser('modules', @@ -551,7 +553,7 @@ def main(): "to use (default: %(default)s)"), default='config', choices=('init', 'config', 'final')) - parser_mod.set_defaults(action=('modules', status_wrapper)) + parser_mod.set_defaults(action=('modules', main_modules)) # These settings are used when you want to query information # stored in the cloud-init data objects/directories/files @@ -592,6 +594,8 @@ def main(): signal_handler.attach_handlers() (name, functor) = args.action + if name in ("modules", "init"): + functor = status_wrapper return util.log_time(logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, get_uptime=True, func=functor, args=(name, args)) diff --git a/cloudinit/util.py b/cloudinit/util.py index 87b0c853..06039ee2 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1395,8 +1395,10 @@ def get_builtin_cfg(): return obj_copy.deepcopy(CFG_BUILTIN) -def sym_link(source, link): +def sym_link(source, link, force=False): LOG.debug("Creating symbolic link from %r => %r", link, source) + if force and os.path.exists(link): + del_file(link) os.symlink(source, link) -- cgit v1.2.3 From 16f95094209faeb7f6fcdb0d9ac498360e7a3e42 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Feb 2014 20:23:59 -0500 Subject: add doc/status.txt --- doc/status.txt | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 doc/status.txt diff --git a/doc/status.txt b/doc/status.txt new file mode 100644 index 00000000..9c2f4b89 --- /dev/null +++ b/doc/status.txt @@ -0,0 +1,51 @@ +cloud-init will keep a 'status' file up to date for other applications +wishing to use it to determine cloud-init status. + +It will manage 2 files: + status.json + finished.json + +The files will be written to /var/lib/cloud/data/ . +A symlink will be created in /run/cloud-init. The link from /run is to ensure +that if the file exists, it is not stale for this boot. + +status.json's format is: + { + 'v1': { + 'init': { + errors: [] # list of strings for each error that occurred + start: integer # time.time() that this stage started or None + end: integer # time.time() that this stage finished or None + }, + 'init-local': { + 'errors': [], 'start': , 'end' # (same as 'init' above) + }, + 'modules-config': { + 'errors': [], 'start': , 'end' # (same as 'init' above) + }, + 'modules-final': { + 'errors': [], 'start': , 'end' # (same as 'init' above) + }, + 'datasource': string describing datasource found or None + 'stage': string representing stage that is currently running + ('init', 'init-local', 'modules-final', 'modules-config', None) + if None, then no stage is running. Reader must read the start/end + of each of the above stages to determine the state. + } + +finished.json's format is: + { + 'datasource': string describing the datasource found + 'errors': [] # list of errors reported + } + +Thus, to determine if cloud-init is finished: + fin = "/run/cloud-init/finished.json" + if os.path.exists(fin): + ret = json.load(open(fin, "r")) + if len(ret): + print "Finished with errors:" + "\n".join(ret['errors']) + else: + print "Finished no errors" + else: + print "Not Finished" -- cgit v1.2.3 From 4055b6d303da775580ab299145788c0cd16c0d45 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Feb 2014 20:27:03 -0500 Subject: fix end/start in doc --- doc/status.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/status.txt b/doc/status.txt index 9c2f4b89..5958fa85 100644 --- a/doc/status.txt +++ b/doc/status.txt @@ -14,17 +14,17 @@ status.json's format is: 'v1': { 'init': { errors: [] # list of strings for each error that occurred - start: integer # time.time() that this stage started or None - end: integer # time.time() that this stage finished or None + start: float # time.time() that this stage started or None + end: float # time.time() that this stage finished or None }, 'init-local': { - 'errors': [], 'start': , 'end' # (same as 'init' above) + 'errors': [], 'start': , 'end' # (same as 'init' above) }, 'modules-config': { - 'errors': [], 'start': , 'end' # (same as 'init' above) + 'errors': [], 'start': , 'end' # (same as 'init' above) }, 'modules-final': { - 'errors': [], 'start': , 'end' # (same as 'init' above) + 'errors': [], 'start': , 'end' # (same as 'init' above) }, 'datasource': string describing datasource found or None 'stage': string representing stage that is currently running -- cgit v1.2.3 From af6b25e0b8895e8eead0a7202d637fa197c4401c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Feb 2014 20:47:35 -0500 Subject: minor cleanups --- bin/cloud-init | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/bin/cloud-init b/bin/cloud-init index 78f8600d..479d715d 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -419,6 +419,10 @@ def main_single(name, args): return 0 +def write_json(path, data): + util.write_file(path, json.dumps(data, indent=1) + "\n") + + def status_wrapper(name, args, data_d=None, link_d=None): if data_d is None: data_d = os.path.normpath("/var/lib/cloud/data") @@ -472,7 +476,7 @@ def status_wrapper(name, args, data_d=None, link_d=None): v1['stage'] = mode v1[mode]['start'] = time.time() - util.write_file(status_path, json.dumps(status)) + write_json(status, status_path) util.sym_link(os.path.relpath(status_path, link_d), status_link, force=True) @@ -481,11 +485,11 @@ def status_wrapper(name, args, data_d=None, link_d=None): if mode in ('init', 'init-local'): (datasource, errors) = ret if datasource is not None: - v1['datasource'] = datasource - v1[mode]['errors'] = errors + v1['datasource'] = str(datasource) else: errors = ret - v1[mode]['errors'] = ret + + v1[mode]['errors'] = [str(e) for e in errors] except Exception as e: v1[mode]['errors'] = [str(e)] @@ -493,7 +497,7 @@ def status_wrapper(name, args, data_d=None, link_d=None): v1[mode]['finished'] = time.time() v1['stage'] = None - util.write_file(status_path, json.dumps(status)) + write_json(status_path, status) if mode == "modules-final": # write the 'finished' file @@ -502,9 +506,8 @@ def status_wrapper(name, args, data_d=None, link_d=None): if v1[m]['errors']: errors.extend(v1[m].get('errors', [])) - finished = {'datasource': v1['datasource'], - 'errors': errors} - util.write_file(result_path, json.dumps(finished)) + write_json(result_path, + {'datasource': v1['datasource'], 'errors': errors}) util.sym_link(os.path.relpath(result_path, link_d), result_link, force=True) -- cgit v1.2.3 From 1692175c0b0afe543065303251674f77e925e2a9 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Feb 2014 20:49:22 -0500 Subject: fix write_json call --- bin/cloud-init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/cloud-init b/bin/cloud-init index 479d715d..d1cd68ea 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -476,7 +476,7 @@ def status_wrapper(name, args, data_d=None, link_d=None): v1['stage'] = mode v1[mode]['start'] = time.time() - write_json(status, status_path) + write_json(status_path, status) util.sym_link(os.path.relpath(status_path, link_d), status_link, force=True) -- cgit v1.2.3 From 27081dacc0812be242860e31f0473b69e7c45c49 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 25 Feb 2014 12:07:03 -0500 Subject: be atomic when writing status files --- bin/cloud-init | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/bin/cloud-init b/bin/cloud-init index d1cd68ea..261aaa4e 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -26,6 +26,7 @@ import json import os import sys import time +import tempfile import traceback # This is more just for running from the bin folder so that @@ -419,8 +420,18 @@ def main_single(name, args): return 0 -def write_json(path, data): - util.write_file(path, json.dumps(data, indent=1) + "\n") +def atomic_write_json(path, data): + tf = None + try: + tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(path), + delete=False) + tf.write(json.dumps(data, indent=1) + "\n") + tf.close() + os.rename(tf.name, path) + except Exception as e: + if tf is not None: + util.del_file(tf.name) + raise e def status_wrapper(name, args, data_d=None, link_d=None): @@ -471,12 +482,11 @@ def status_wrapper(name, args, data_d=None, link_d=None): status['v1'][m] = nullstatus.copy() status['v1']['datasource'] = None - v1 = status['v1'] v1['stage'] = mode v1[mode]['start'] = time.time() - write_json(status_path, status) + atomic_write_json(status_path, status) util.sym_link(os.path.relpath(status_path, link_d), status_link, force=True) @@ -497,7 +507,7 @@ def status_wrapper(name, args, data_d=None, link_d=None): v1[mode]['finished'] = time.time() v1['stage'] = None - write_json(status_path, status) + atomic_write_json(status_path, status) if mode == "modules-final": # write the 'finished' file @@ -506,7 +516,7 @@ def status_wrapper(name, args, data_d=None, link_d=None): if v1[m]['errors']: errors.extend(v1[m].get('errors', [])) - write_json(result_path, + atomic_write_json(result_path, {'datasource': v1['datasource'], 'errors': errors}) util.sym_link(os.path.relpath(result_path, link_d), result_link, force=True) -- cgit v1.2.3 From 778d2015ec49170ff4525b63903d7a656ad44b2e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 3 Mar 2014 15:01:18 -0500 Subject: cc_seed_random: fix bug and support pollinate command there was a bug that prevented seeding of /dev/urandom from metadata provided by the datasource unless the user provided random_seed config. This should, instead, be the default behavior. --- cloudinit/config/cc_seed_random.py | 50 ++++++++++++--- .../test_handler/test_handler_seed_random.py | 75 ++++++++++++++++++++++ 2 files changed, 116 insertions(+), 9 deletions(-) diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 22a31f29..56c19ad5 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -1,8 +1,11 @@ # vi: ts=4 expandtab # # Copyright (C) 2013 Yahoo! Inc. +# Copyright (C) 2014 Canonical, Ltd # # Author: Joshua Harlow +# Author: Dustin Kirkland +# Author: Scott Moser # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -17,12 +20,15 @@ # along with this program. If not, see . import base64 +import os from StringIO import StringIO from cloudinit.settings import PER_INSTANCE +from cloudinit import log as logging from cloudinit import util frequency = PER_INSTANCE +LOG = logging.getLogger(__name__) def _decode(data, encoding=None): @@ -38,24 +44,50 @@ def _decode(data, encoding=None): raise IOError("Unknown random_seed encoding: %s" % (encoding)) -def handle(name, cfg, cloud, log, _args): - if not cfg or "random_seed" not in cfg: - log.debug(("Skipping module named %s, " - "no 'random_seed' configuration found"), name) +def handle_random_seed_command(command, required, env=None): + if not command and required: + raise ValueError("no command found but required=true") + elif not command: + LOG.debug("no command provided") return - my_cfg = cfg['random_seed'] - seed_path = my_cfg.get('file', '/dev/urandom') + cmd = command[0] + if not util.which(cmd): + if required: + raise ValueError("command '%s' not found but required=true", cmd) + else: + LOG.debug("command '%s' not found for seed_command", cmd) + return + util.subp(command, env=env) + + +def handle(name, cfg, cloud, log, _args): + mycfg = cfg.get('random_seed', {}) + seed_path = mycfg.get('file', '/dev/urandom') + seed_data = mycfg.get('data', '') + seed_buf = StringIO() - seed_buf.write(_decode(my_cfg.get('data', ''), - encoding=my_cfg.get('encoding'))) + if seed_data: + seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding'))) + # 'random_seed' is set up by Azure datasource, and comes already in + # openstack meta_data.json metadata = cloud.datasource.metadata if metadata and 'random_seed' in metadata: seed_buf.write(metadata['random_seed']) seed_data = seed_buf.getvalue() if len(seed_data): - log.debug("%s: adding %s bytes of random seed entrophy to %s", name, + log.debug("%s: adding %s bytes of random seed entropy to %s", name, len(seed_data), seed_path) util.append_file(seed_path, seed_data) + + command = mycfg.get('command', ['pollinate', '-q']) + req = mycfg.get('command_required', False) + try: + env = os.environ.copy() + env['RANDOM_SEED_FILE'] = seed_path + handle_random_seed_command(command=command, required=req, env=env) + except ValueError as e: + log.warn("handling random command [%s] failed: %s", command, e) + raise e diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py index 2b21ac02..be2fa4a4 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/test_handler/test_handler_seed_random.py @@ -42,10 +42,32 @@ class TestRandomSeed(t_help.TestCase): def setUp(self): super(TestRandomSeed, self).setUp() self._seed_file = tempfile.mktemp() + self.unapply = [] + + # by default 'which' has nothing in its path + self.apply_patches([(util, 'which', self._which)]) + self.apply_patches([(util, 'subp', self._subp)]) + self.subp_called = [] + self.whichdata = {} def tearDown(self): + apply_patches([i for i in reversed(self.unapply)]) util.del_file(self._seed_file) + def apply_patches(self, patches): + ret = apply_patches(patches) + self.unapply += ret + + def _which(self, program): + return self.whichdata.get(program) + + def _subp(self, *args, **kwargs): + # supports subp calling with cmd as args or kwargs + if 'args' not in kwargs: + kwargs['args'] = args[0] + self.subp_called.append(kwargs) + return + def _compress(self, text): contents = StringIO() gz_fh = gzip.GzipFile(mode='wb', fileobj=contents) @@ -148,3 +170,56 @@ class TestRandomSeed(t_help.TestCase): cc_seed_random.handle('test', cfg, c, LOG, []) contents = util.load_file(self._seed_file) self.assertEquals('tiny-tim-was-here-so-was-josh', contents) + + def test_seed_command_not_provided_pollinate_available(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {'pollinate': '/usr/bin/pollinate'} + cc_seed_random.handle('test', {}, c, LOG, []) + + subp_args = [f['args'] for f in self.subp_called] + self.assertIn(['pollinate', '-q'], subp_args) + + def test_seed_command_not_provided_pollinate_not_available(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {} + cc_seed_random.handle('test', {}, c, LOG, []) + + # subp should not have been called as which would say not available + self.assertEquals(self.subp_called, list()) + + def test_unavailable_seed_command_and_required_raises_error(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {} + self.assertRaises(ValueError, cc_seed_random.handle, + 'test', {'random_seed': {'command_required': True}}, c, LOG, []) + + def test_seed_command_and_required(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {'foo': 'foo'} + cfg = {'random_seed': {'command_required': True, 'command': ['foo']}} + cc_seed_random.handle('test', cfg, c, LOG, []) + + self.assertIn(['foo'], [f['args'] for f in self.subp_called]) + + def test_file_in_environment_for_command(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {'foo': 'foo'} + cfg = {'random_seed': {'command_required': True, 'command': ['foo'], + 'file': self._seed_file}} + cc_seed_random.handle('test', cfg, c, LOG, []) + + # this just instists that the first time subp was called, + # RANDOM_SEED_FILE was in the environment set up correctly + subp_env = [f['env'] for f in self.subp_called] + self.assertEqual(subp_env[0].get('RANDOM_SEED_FILE'), self._seed_file) + + +def apply_patches(patches): + ret = [] + for (ref, name, replace) in patches: + if replace is None: + continue + orig = getattr(ref, name) + setattr(ref, name, replace) + ret.append((ref, name, orig)) + return ret -- cgit v1.2.3 From d7b79b1c5703a9fc4d533d15efa5fdb1f4f8352b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 3 Mar 2014 16:33:11 -0500 Subject: allow random command's output to go through by default we call 'pollinate -q' which is nice and quiet. if the user wants to be noisy, let them. --- cloudinit/config/cc_seed_random.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 56c19ad5..49a6b3e8 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -58,7 +58,7 @@ def handle_random_seed_command(command, required, env=None): else: LOG.debug("command '%s' not found for seed_command", cmd) return - util.subp(command, env=env) + util.subp(command, env=env, capture=False) def handle(name, cfg, cloud, log, _args): -- cgit v1.2.3 From 2b35f6b814b7f30ceea1e8a58c928f2818bb2729 Mon Sep 17 00:00:00 2001 From: Dustin Kirkland Date: Mon, 3 Mar 2014 16:44:31 -0500 Subject: seed_random: support a 'command' to seed /dev/random This extends 'random_seed' top level entry to include a 'command' entry, that has the opportunity to then seed the random number generator. Example config: #cloud-config random_seed: command: ['dd', 'if=/dev/zero', 'of=/dev/random', 'bs=1M', 'count=10'] LP: #1286316 --- ChangeLog | 2 + cloudinit/config/cc_seed_random.py | 47 ++++++++++++--- .../test_handler/test_handler_seed_random.py | 67 ++++++++++++++++++++++ 3 files changed, 107 insertions(+), 9 deletions(-) diff --git a/ChangeLog b/ChangeLog index 76ab88c4..a45ab73b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -33,6 +33,8 @@ rather than relying on EC2 data in openstack metadata service. - SmartOS, AltCloud: disable running on arm systems due to bug (LP: #1243287, #1285686) [Oleg Strikov] + - Allow running a command to seed random, default is 'pollinate -q' + (LP: #1286316) [Dustin Kirkland] 0.7.4: - fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a partitioned block device with target filesystem on ephemeral0.1. diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 22a31f29..599280f6 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -1,8 +1,11 @@ # vi: ts=4 expandtab # # Copyright (C) 2013 Yahoo! Inc. +# Copyright (C) 2014 Canonical, Ltd # # Author: Joshua Harlow +# Author: Dustin Kirkland +# Author: Scott Moser # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -20,9 +23,11 @@ import base64 from StringIO import StringIO from cloudinit.settings import PER_INSTANCE +from cloudinit import log as logging from cloudinit import util frequency = PER_INSTANCE +LOG = logging.getLogger(__name__) def _decode(data, encoding=None): @@ -38,24 +43,48 @@ def _decode(data, encoding=None): raise IOError("Unknown random_seed encoding: %s" % (encoding)) -def handle(name, cfg, cloud, log, _args): - if not cfg or "random_seed" not in cfg: - log.debug(("Skipping module named %s, " - "no 'random_seed' configuration found"), name) +def handle_random_seed_command(command, required): + if not command and required: + raise ValueError("no command found but required=true") + elif not command: + LOG.debug("no command provided") return - my_cfg = cfg['random_seed'] - seed_path = my_cfg.get('file', '/dev/urandom') + cmd = command[0] + if not util.which(cmd): + if required: + raise ValueError("command '%s' not found but required=true", cmd) + else: + LOG.debug("command '%s' not found for seed_command", cmd) + return + util.subp(command) + + +def handle(name, cfg, cloud, log, _args): + mycfg = cfg.get('random_seed', {}) + seed_path = mycfg.get('file', '/dev/urandom') + seed_data = mycfg.get('data', '') + seed_buf = StringIO() - seed_buf.write(_decode(my_cfg.get('data', ''), - encoding=my_cfg.get('encoding'))) + if seed_data: + seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding'))) + # 'random_seed' is set up by Azure datasource, and comes already in + # openstack meta_data.json metadata = cloud.datasource.metadata if metadata and 'random_seed' in metadata: seed_buf.write(metadata['random_seed']) seed_data = seed_buf.getvalue() if len(seed_data): - log.debug("%s: adding %s bytes of random seed entrophy to %s", name, + log.debug("%s: adding %s bytes of random seed entropy to %s", name, len(seed_data), seed_path) util.append_file(seed_path, seed_data) + + command = mycfg.get('command', ['pollinate', '-q']) + req = mycfg.get('command_required', False) + try: + handle_random_seed_command(command=command, required=req) + except ValueError as e: + log.warn("handling random command [%s] failed: %s", command, e) + raise e diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py index 2b21ac02..00c50fc1 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/test_handler/test_handler_seed_random.py @@ -42,10 +42,29 @@ class TestRandomSeed(t_help.TestCase): def setUp(self): super(TestRandomSeed, self).setUp() self._seed_file = tempfile.mktemp() + self.unapply = [] + + # by default 'which' has nothing in its path + self.apply_patches([(util, 'which', self._which)]) + self.apply_patches([(util, 'subp', self._subp)]) + self.subp_called = [] + self.whichdata = {} def tearDown(self): + apply_patches([i for i in reversed(self.unapply)]) util.del_file(self._seed_file) + def apply_patches(self, patches): + ret = apply_patches(patches) + self.unapply += ret + + def _which(self, program): + return self.whichdata.get(program) + + def _subp(self, args): + self.subp_called.append(tuple(args)) + return + def _compress(self, text): contents = StringIO() gz_fh = gzip.GzipFile(mode='wb', fileobj=contents) @@ -148,3 +167,51 @@ class TestRandomSeed(t_help.TestCase): cc_seed_random.handle('test', cfg, c, LOG, []) contents = util.load_file(self._seed_file) self.assertEquals('tiny-tim-was-here-so-was-josh', contents) + + def test_seed_command_not_provided_pollinate_available(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {'pollinate': '/usr/bin/pollinate'} + cc_seed_random.handle('test', {}, c, LOG, []) + + self.assertEquals(self.subp_called, [('pollinate', '-q')]) + + def test_seed_command_not_provided_pollinate_not_available(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {} + cc_seed_random.handle('test', {}, c, LOG, []) + + # subp should not have been called as which would say not available + self.assertEquals(self.subp_called, list()) + + def test_unavailable_seed_command_and_required_raises_error(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {} + self.assertRaises(ValueError, cc_seed_random.handle, + 'test', {'random_seed': {'command_required': True}}, c, LOG, []) + + def test_seed_command_and_required(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {'foo': 'foo'} + cfg = {'random_seed': {'command_required': True, 'command': ['foo']}} + cc_seed_random.handle('test', cfg, c, LOG, []) + + self.assertEquals(self.subp_called, [('foo',)]) + + def test_seed_command_non_default(self): + c = self._get_cloud('ubuntu', {}) + self.whichdata = {'foo': 'foo'} + cfg = {'random_seed': {'command_required': True, 'command': ['foo']}} + cc_seed_random.handle('test', cfg, c, LOG, []) + + self.assertEquals(self.subp_called, [('foo',)]) + + +def apply_patches(patches): + ret = [] + for (ref, name, replace) in patches: + if replace is None: + continue + orig = getattr(ref, name) + setattr(ref, name, replace) + ret.append((ref, name, orig)) + return ret -- cgit v1.2.3 From f33583bae55dbf071cce88c4e85b289c93e970c8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 3 Mar 2014 16:49:37 -0500 Subject: version space (v1:) result_path json also --- bin/cloud-init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/cloud-init b/bin/cloud-init index 261aaa4e..6ede60af 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -517,7 +517,7 @@ def status_wrapper(name, args, data_d=None, link_d=None): errors.extend(v1[m].get('errors', [])) atomic_write_json(result_path, - {'datasource': v1['datasource'], 'errors': errors}) + {'v1': {'datasource': v1['datasource'], 'errors': errors}}) util.sym_link(os.path.relpath(result_path, link_d), result_link, force=True) -- cgit v1.2.3 From d9661a8ef4c6003ef48757715965ebb5c071c80b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 12 Mar 2014 10:59:13 -0400 Subject: final_message: allow replacement of capital name keys. documentation of final_message in doc/examples/cloud-config-final-message.txt showed '$UPTIME' and '$TIMESTAMP' would be available, but only the lower case versions of these strings were available. This change just makes all lower case and upper case keys available here to avoid breaking anyone who used the functional-but-not-correctly-documented lower case names. LP: #1286164 --- cloudinit/config/cc_final_message.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index e92cba4a..b24294e4 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -53,6 +53,7 @@ def handle(_name, cfg, cloud, log, args): 'version': cver, 'datasource': str(cloud.datasource), } + subs.update(dict([(k.upper(), v) for k, v in subs.items()])) util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), console=False, stderr=True, log=log) except Exception: -- cgit v1.2.3 From e91fd55890922d9054523afab4d7e4b268c1be64 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 18 Mar 2014 15:57:30 -0600 Subject: Windows Azure defines the ephemeral0 mount as being a per-boot instead of per instance. Under a variety of circumstances, the ephemeral device may be presented as a default device. This patch detects when that situation happens and triggers CC modules disk-setup and mounts to run again. Details of changes for cloudinit/sources/DataSourceAzure.py: - auto-detect the location of ephemeral0 - check each boot if ephemeral0 is new - done via NTFS w/ label of "Temporary Storage" w/ no files on it - if device is mounted, datasource will unmount it - if is new, change mounts and disk-setup to always for that boot only --- cloudinit/sources/DataSourceAzure.py | 98 ++++++++++++++++++++++++++++++++++-- 1 file changed, 94 insertions(+), 4 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index c7331da5..256e0539 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -18,12 +18,14 @@ import base64 import crypt +import fnmatch import os import os.path import time from xml.dom import minidom from cloudinit import log as logging +from cloudinit.settings import PER_ALWAYS from cloudinit import sources from cloudinit import util @@ -53,14 +55,15 @@ BUILTIN_CLOUD_CONFIG = { 'disk_setup': { 'ephemeral0': {'table_type': 'mbr', 'layout': True, - 'overwrite': False} - }, + 'overwrite': False}, + }, 'fs_setup': [{'filesystem': 'ext4', 'device': 'ephemeral0.1', - 'replace_fs': 'ntfs'}] + 'replace_fs': 'ntfs'}], } DS_CFG_PATH = ['datasource', DS_NAME] +DEF_EPHEMERAL_LABEL = 'Temporary Storage' class DataSourceAzureNet(sources.DataSource): @@ -189,8 +192,17 @@ class DataSourceAzureNet(sources.DataSource): LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) pubkeys = pubkeys_from_crt_files(fp_files) - self.metadata['public-keys'] = pubkeys + + found_ephemeral = find_ephemeral_disk() + if found_ephemeral: + self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral + LOG.debug("using detected ephemeral0 of %s" % found_ephemeral) + + cc_modules_override = support_new_ephemeral(self.sys_cfg) + if cc_modules_override: + self.cfg['cloud_config_modules'] = cc_modules_override + return True def device_name_to_device(self, name): @@ -200,6 +212,84 @@ class DataSourceAzureNet(sources.DataSource): return self.cfg +def count_files(mp): + return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*')) + + +def find_ephemeral_part(): + """ + Locate the default ephmeral0.1 device. This will be the first device + that has a LABEL of DEF_EPHEMERAL_LABEL and is a NTFS device. If Azure + gets more ephemeral devices, this logic will only identify the first + such device. + """ + c_label_devs = util.find_devs_with("LABEL=%s" % DEF_EPHEMERAL_LABEL) + c_fstype_devs = util.find_devs_with("TYPE=ntfs") + for dev in c_label_devs: + if dev in c_fstype_devs: + return dev + return None + + +def find_ephemeral_disk(): + """ + Get the ephemeral disk. + """ + part_dev = find_ephemeral_part() + if part_dev and str(part_dev[-1]).isdigit(): + return part_dev[:-1] + elif part_dev: + return part_dev + return None + + +def support_new_ephemeral(cfg): + """ + Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device + may be presented as a fresh device, or not. + + Since the knowledge of when a disk is supposed to be plowed under is specific + to Windows Azure, the logic resides here in the datasource. When a new ephemeral + device is detected, cloud-init overrides the default frequency for both disk-setup + and mounts for the current boot only. + """ + device = find_ephemeral_part() + if not device: + LOG.debug("no default fabric formated ephemeral0.1 found") + return None + LOG.debug("fabric formated ephemeral0.1 device at %s" % device) + + file_count = 0 + try: + file_count = util.mount_cb(device, count_files) + except: + return None + LOG.debug("fabric prepared ephmeral0.1 has %s files on it" % file_count) + + if file_count >= 1: + LOG.debug("fabric prepared ephemeral0.1 will be preserved") + return None + else: + with util.unmounter(device): + LOG.debug("unmounted fabric prepared ephemeral0.1") + + LOG.debug("cloud-init will format ephemeral0.1 this boot.") + LOG.debug("setting disk_setup and mounts modules 'always' for this boot") + + cc_modules = cfg.get('cloud_config_modules') + if cc_modules: + mod_list = [] + for mod in cc_modules: + if mod in ("disk_setup", "mounts"): + mod_list.append([mod, PER_ALWAYS]) + LOG.debug("set module '%s' to 'always' for this boot" % mod) + else: + mod_list.append(mod) + return mod_list + + return None + + def handle_set_hostname(enabled, hostname, cfg): if not util.is_true(enabled): return -- cgit v1.2.3 From 2025632daf5b202dbe6424a112d8689a1f93d9ac Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Mar 2014 13:29:27 -0400 Subject: minor changes: be more careful about umount and warn on fail --- cloudinit/sources/DataSourceAzure.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 256e0539..ffb4ff87 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -269,25 +269,28 @@ def support_new_ephemeral(cfg): if file_count >= 1: LOG.debug("fabric prepared ephemeral0.1 will be preserved") return None - else: - with util.unmounter(device): - LOG.debug("unmounted fabric prepared ephemeral0.1") + elif device in util.mounted(): + try: + util.subp(['umount', device]) + except util.ProcessExecutionError as e: + LOG.warn("Failed to unmount %s, will not reformat", device) + return None LOG.debug("cloud-init will format ephemeral0.1 this boot.") LOG.debug("setting disk_setup and mounts modules 'always' for this boot") cc_modules = cfg.get('cloud_config_modules') - if cc_modules: - mod_list = [] - for mod in cc_modules: - if mod in ("disk_setup", "mounts"): - mod_list.append([mod, PER_ALWAYS]) - LOG.debug("set module '%s' to 'always' for this boot" % mod) - else: - mod_list.append(mod) - return mod_list + if not cc_modules: + return None - return None + mod_list = [] + for mod in cc_modules: + if mod in ("disk_setup", "mounts"): + mod_list.append([mod, PER_ALWAYS]) + LOG.debug("set module '%s' to 'always' for this boot" % mod) + else: + mod_list.append(mod) + return mod_list def handle_set_hostname(enabled, hostname, cfg): -- cgit v1.2.3 From 47019b77b23c72cd2e71098c01c4d86b06d1de8c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Mar 2014 13:38:37 -0400 Subject: change to unmount then check to address possible race --- cloudinit/sources/DataSourceAzure.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index ffb4ff87..39b8f4f6 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -269,7 +269,18 @@ def support_new_ephemeral(cfg): if file_count >= 1: LOG.debug("fabric prepared ephemeral0.1 will be preserved") return None - elif device in util.mounted(): + else: + # if device was already mounted, then we need to unmount it + # race conditions could allow for a check-then-unmount + # to have a false positive. so just unmount and then check. + try: + util.subp(['umount', device]) + except util.ProcessExecutionError as e: + if device in util.mounts(): + LOG.warn("Failed to unmount %s, will not reformat", device) + return None + + if device in util.mounts(): try: util.subp(['umount', device]) except util.ProcessExecutionError as e: -- cgit v1.2.3 From 2fe478831680a270c456122fdeecc3c639a4ec62 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Mar 2014 14:00:41 -0400 Subject: Azure: pep8 and pylint cleanups from previous commit. --- cloudinit/sources/DataSourceAzure.py | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 39b8f4f6..bd75e6d8 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -197,7 +197,7 @@ class DataSourceAzureNet(sources.DataSource): found_ephemeral = find_ephemeral_disk() if found_ephemeral: self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral - LOG.debug("using detected ephemeral0 of %s" % found_ephemeral) + LOG.debug("using detected ephemeral0 of %s", found_ephemeral) cc_modules_override = support_new_ephemeral(self.sys_cfg) if cc_modules_override: @@ -248,23 +248,23 @@ def support_new_ephemeral(cfg): Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device may be presented as a fresh device, or not. - Since the knowledge of when a disk is supposed to be plowed under is specific - to Windows Azure, the logic resides here in the datasource. When a new ephemeral - device is detected, cloud-init overrides the default frequency for both disk-setup - and mounts for the current boot only. + Since the knowledge of when a disk is supposed to be plowed under is + specific to Windows Azure, the logic resides here in the datasource. When a + new ephemeral device is detected, cloud-init overrides the default + frequency for both disk-setup and mounts for the current boot only. """ device = find_ephemeral_part() if not device: LOG.debug("no default fabric formated ephemeral0.1 found") return None - LOG.debug("fabric formated ephemeral0.1 device at %s" % device) + LOG.debug("fabric formated ephemeral0.1 device at %s", device) file_count = 0 try: file_count = util.mount_cb(device, count_files) except: return None - LOG.debug("fabric prepared ephmeral0.1 has %s files on it" % file_count) + LOG.debug("fabric prepared ephmeral0.1 has %s files on it", file_count) if file_count >= 1: LOG.debug("fabric prepared ephemeral0.1 will be preserved") @@ -277,15 +277,9 @@ def support_new_ephemeral(cfg): util.subp(['umount', device]) except util.ProcessExecutionError as e: if device in util.mounts(): - LOG.warn("Failed to unmount %s, will not reformat", device) + LOG.warn("Failed to unmount %s, will not reformat.", device) + LOG.debug("Failed umount: %s", e) return None - - if device in util.mounts(): - try: - util.subp(['umount', device]) - except util.ProcessExecutionError as e: - LOG.warn("Failed to unmount %s, will not reformat", device) - return None LOG.debug("cloud-init will format ephemeral0.1 this boot.") LOG.debug("setting disk_setup and mounts modules 'always' for this boot") @@ -298,7 +292,7 @@ def support_new_ephemeral(cfg): for mod in cc_modules: if mod in ("disk_setup", "mounts"): mod_list.append([mod, PER_ALWAYS]) - LOG.debug("set module '%s' to 'always' for this boot" % mod) + LOG.debug("set module '%s' to 'always' for this boot", mod) else: mod_list.append(mod) return mod_list -- cgit v1.2.3 From 11d6dbfad89e3f9a56925f7671fa7ee3e86af918 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Mar 2014 12:33:29 -0400 Subject: NoCloud: fix broken seedfrom on the kernel command line This was broken in the VendorData add. LP: #1295223 --- cloudinit/sources/DataSourceNoCloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 8dc96ab6..a315aae0 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -57,7 +57,7 @@ class DataSourceNoCloud(sources.DataSource): md = {} if parse_cmdline_data(self.cmdline_id, md): found.append("cmdline") - mydata.update(md) + mydata['meta-data'].update(md) except: util.logexc(LOG, "Unable to parse command line data") return False -- cgit v1.2.3 From 9486c1a1abacb9829e5ab172212d57c3735e35e0 Mon Sep 17 00:00:00 2001 From: Enol Fernandez Date: Tue, 25 Mar 2014 16:31:16 +0100 Subject: Added base64 decoding of user data for OpenNebula. --- cloudinit/sources/DataSourceOpenNebula.py | 12 +++++++++++ tests/unittests/test_datasource/test_opennebula.py | 25 ++++++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index b0464cbb..d91b80ab 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -4,11 +4,13 @@ # Copyright (C) 2012 Yahoo! Inc. # Copyright (C) 2012-2013 CERIT Scientific Cloud # Copyright (C) 2012-2013 OpenNebula.org +# Copyright (C) 2014 Consejo Superior de Investigaciones Cientificas # # Author: Scott Moser # Author: Joshua Harlow # Author: Vlastimil Holer # Author: Javier Fontan +# Author: Enol Fernandez # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -22,6 +24,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import base64 import os import pwd import re @@ -417,6 +420,15 @@ def read_context_disk_dir(source_dir, asuser=None): elif "USERDATA" in context: results['userdata'] = context["USERDATA"] + # b64decode user data if necessary (default) + if 'userdata' in results: + userdata_encoding = context.get('USERDATA_ENCODING', None) + if userdata_encoding in (None, "base64"): + try: + results['userdata'] = base64.b64decode(results['userdata']) + except TypeError: + LOG.warn("Failed base64 decoding of userdata") + # generate static /etc/network/interfaces # only if there are any required context variables # http://opennebula.org/documentation:rel3.8:cong#network_configuration diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 6fc5b2ac..47e7acbc 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -4,6 +4,7 @@ from cloudinit import util from mocker import MockerTestCase from tests.unittests.helpers import populate_dir +from base64 import b64encode import os import pwd @@ -164,10 +165,30 @@ class TestOpenNebulaDataSource(MockerTestCase): public_keys.append(SSH_KEY % (c + 1,)) - def test_user_data(self): + def test_user_data_plain(self): for k in ('USER_DATA', 'USERDATA'): my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: USER_DATA}) + populate_context_dir(my_d, {k: USER_DATA, + 'USERDATA_ENCODING': ''}) + results = ds.read_context_disk_dir(my_d) + + self.assertTrue('userdata' in results) + self.assertEqual(USER_DATA, results['userdata']) + + def test_user_data_default_encoding(self): + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: b64encode(USER_DATA)}) + results = ds.read_context_disk_dir(my_d) + + self.assertTrue('userdata' in results) + self.assertEqual(USER_DATA, results['userdata']) + + def test_user_data_base64_encoding(self): + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: b64encode(USER_DATA), + 'USERDATA_ENCODING': 'base64'}) results = ds.read_context_disk_dir(my_d) self.assertTrue('userdata' in results) -- cgit v1.2.3 From 2ecefdf51cd93b593bea450b4d751021da91e748 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 27 Mar 2014 10:03:27 -0400 Subject: change 'default' encoding to be "None" Instead of just trying to see if userdata decodes as the indication that it should be encoded, the user must explicitly set this. The "just try it" will fail in the case where the user had other use of user-data and wanted a blob of data to go through unrecognized by cloud-init. In cases where there can be mistake in automatic behavior, and some users may be relaying on old behavior, its best to just require explicit use. --- cloudinit/sources/DataSourceOpenNebula.py | 5 +++-- tests/unittests/test_datasource/test_opennebula.py | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index d91b80ab..34557f8b 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -422,8 +422,9 @@ def read_context_disk_dir(source_dir, asuser=None): # b64decode user data if necessary (default) if 'userdata' in results: - userdata_encoding = context.get('USERDATA_ENCODING', None) - if userdata_encoding in (None, "base64"): + encoding = context.get('USERDATA_ENCODING', + context.get('USER_DATA_ENCODING')) + if encoding == "base64": try: results['userdata'] = base64.b64decode(results['userdata']) except TypeError: diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 47e7acbc..ec6b752b 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -175,14 +175,15 @@ class TestOpenNebulaDataSource(MockerTestCase): self.assertTrue('userdata' in results) self.assertEqual(USER_DATA, results['userdata']) - def test_user_data_default_encoding(self): + def test_user_data_encoding_required_for_decode(self): + b64userdata = b64encode(USER_DATA) for k in ('USER_DATA', 'USERDATA'): my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: b64encode(USER_DATA)}) + populate_context_dir(my_d, {k: b64userdata}) results = ds.read_context_disk_dir(my_d) self.assertTrue('userdata' in results) - self.assertEqual(USER_DATA, results['userdata']) + self.assertEqual(b64userdata, results['userdata']) def test_user_data_base64_encoding(self): for k in ('USER_DATA', 'USERDATA'): -- cgit v1.2.3 From 5d9726742c22f4c80ce2f386d09c1bbcf4b67164 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 1 Apr 2014 14:20:57 -0400 Subject: pyflakes cleanups --- cloudinit/config/cc_power_state_change.py | 1 - tests/unittests/test__init__.py | 6 +----- tests/unittests/test_datasource/test_maas.py | 1 - tests/unittests/test_datasource/test_smartos.py | 3 --- tests/unittests/test_handler/test_handler_yum_add_repo.py | 1 - 5 files changed, 1 insertion(+), 11 deletions(-) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 561c5abd..8f99e887 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -22,7 +22,6 @@ from cloudinit import util import errno import os import re -import signal import subprocess import time diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 8c41c1ca..03065c8b 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -1,14 +1,10 @@ -import logging import os -import StringIO -import sys -from mocker import MockerTestCase, ANY, ARGS, KWARGS +from mocker import MockerTestCase, ARGS, KWARGS from cloudinit import handlers from cloudinit import helpers from cloudinit import importer -from cloudinit import log from cloudinit import settings from cloudinit import url_helper from cloudinit import util diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index bd5d23fd..73cfadcb 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -3,7 +3,6 @@ import os from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper -from cloudinit import util from tests.unittests.helpers import populate_dir import mocker diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 8f9fa27d..45f1708a 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -24,10 +24,7 @@ import base64 from cloudinit import helpers as c_helpers -from cloudinit import stages -from cloudinit import util from cloudinit.sources import DataSourceSmartOS -from cloudinit.settings import (PER_INSTANCE) from tests.unittests import helpers import os import os.path diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index 8df592f9..7c6f7c40 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -1,4 +1,3 @@ -from cloudinit import helpers from cloudinit import util from cloudinit.config import cc_yum_add_repo -- cgit v1.2.3 From f7fa9d2aa9abd81b8f8b79b95bdb1fc0c10b5fe9 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 27 May 2014 10:17:18 -0600 Subject: Enable vendordata for CloudSigma (LP: #1303986) --- cloudinit/sources/DataSourceCloudSigma.py | 2 ++ tests/unittests/test_datasource/test_cloudsigma.py | 28 +++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index e1c7e566..ad2a044a 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -66,6 +66,8 @@ class DataSourceCloudSigma(sources.DataSource): self.userdata_raw = server_meta.get('cloudinit-user-data', "") if 'cloudinit-user-data' in base64_fields: self.userdata_raw = b64decode(self.userdata_raw) + if 'cloudinit' in server_context.get('vendor_data', {}): + self.vendordata_raw = server_context["vendor_data"]["cloudinit"] self.metadata = server_context self.ssh_public_key = server_meta['ssh_public_key'] diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index adbb4afb..a1342a86 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -20,7 +20,11 @@ SERVER_CONTEXT = { "smp": 1, "tags": ["much server", "very performance"], "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890", - "vnc_password": "9e84d6cb49e46379" + "vnc_password": "9e84d6cb49e46379", + "vendor_data": { + "location": "zrh", + "cloudinit": "#cloud-config\n\n...", + } } @@ -68,3 +72,25 @@ class DataSourceCloudSigmaTest(TestCase): self.datasource.get_data() self.assertEqual(self.datasource.userdata_raw, b'hi world\n') + + def test_vendor_data(self): + self.assertEqual(self.datasource.vendordata_raw, + SERVER_CONTEXT['vendor_data']['cloudinit']) + + def test_lack_of_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"] + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) + + def test_lack_of_cloudinit_key_in_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"]["cloudinit"] + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) -- cgit v1.2.3 From 882f7186143c337e0f30f4ed2c0415f238ed5c83 Mon Sep 17 00:00:00 2001 From: Kiril Vladimiroff Date: Fri, 30 May 2014 14:17:57 +0300 Subject: Add timeouts for reading/writing from/to to the serial console --- cloudinit/cs_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py index 4e53c31a..1db3f110 100644 --- a/cloudinit/cs_utils.py +++ b/cloudinit/cs_utils.py @@ -35,6 +35,8 @@ import platform import serial +READ_TIMEOUT = 60 +WRITE_TIMEOUT = 10 SERIAL_PORT = '/dev/ttyS1' if platform.system() == 'Windows': SERIAL_PORT = 'COM2' @@ -76,7 +78,9 @@ class CepkoResult(object): self.result = self._marshal(self.raw_result) def _execute(self): - connection = serial.Serial(SERIAL_PORT) + connection = serial.Serial(port=SERIAL_PORT, + timeout=READ_TIMEOUT, + writeTimeout=WRITE_TIMEOUT) connection.write(self.request) return connection.readline().strip('\x04\n') -- cgit v1.2.3 From 71d817c427f06e9e1f5d547d5db191e541963d31 Mon Sep 17 00:00:00 2001 From: Kiril Vladimiroff Date: Fri, 30 May 2014 14:19:10 +0300 Subject: Use dmidecode to detect if cloud-init runs in CloudSigma's infrastructure --- cloudinit/sources/DataSourceCloudSigma.py | 22 ++++++++++++++++++++++ tests/unittests/test_datasource/test_cloudsigma.py | 1 + 2 files changed, 23 insertions(+) diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index e1c7e566..fffff91e 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -20,6 +20,7 @@ import re from cloudinit import log as logging from cloudinit import sources +from cloudinit import util from cloudinit.cs_utils import Cepko LOG = logging.getLogger(__name__) @@ -40,12 +41,33 @@ class DataSourceCloudSigma(sources.DataSource): self.ssh_public_key = '' sources.DataSource.__init__(self, sys_cfg, distro, paths) + def is_running_in_cloudsigma(self): + """ + Uses dmidecode to detect if this instance of cloud-init is running + in the CloudSigma's infrastructure. + """ + dmidecode_path = util.which('dmidecode') + if not dmidecode_path: + return False + + LOG.debug("Determining hypervisor product name via dmidecode") + try: + system_product_name, _ = util.subp([dmidecode_path, "-s", "system-product-name"]) + return 'cloudsigma' in system_product_name.lower() + except: + LOG.exception("Failed to get hypervisor product name") + + return False + def get_data(self): """ Metadata is the whole server context and /meta/cloud-config is used as userdata. """ dsmode = None + if not self.is_running_in_cloudsigma(): + return False + try: server_context = self.cepko.all().result server_meta = server_context['meta'] diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index adbb4afb..25dc12f3 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -35,6 +35,7 @@ class CepkoMock(Cepko): class DataSourceCloudSigmaTest(TestCase): def setUp(self): self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) self.datasource.get_data() -- cgit v1.2.3 From 910128e61aa3244e254d9c9c803436a5b9aef93c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 30 May 2014 10:26:39 -0400 Subject: open 0.7.6 --- ChangeLog | 2 ++ cloudinit/version.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 7e2b761e..74145ae1 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,5 @@ +0.7.6: + - open 0.7.6 0.7.5: - open 0.7.5 - Add a debug log message around import failures diff --git a/cloudinit/version.py b/cloudinit/version.py index 3db57235..edb651a9 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -20,7 +20,7 @@ from distutils import version as vr def version(): - return vr.StrictVersion("0.7.5") + return vr.StrictVersion("0.7.6") def version_string(): -- cgit v1.2.3 From 2d36a7ce4a0ccec3bd2881dd99d6d5012a85fe3c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 30 May 2014 14:46:53 -0400 Subject: minor cleanups. * do not run dmidecode on arm. * line length * comment that 60 second time out is expected --- cloudinit/cs_utils.py | 2 ++ cloudinit/sources/DataSourceCloudSigma.py | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py index 1db3f110..dcf56431 100644 --- a/cloudinit/cs_utils.py +++ b/cloudinit/cs_utils.py @@ -35,8 +35,10 @@ import platform import serial +# these high timeouts are necessary as read may read a lot of data. READ_TIMEOUT = 60 WRITE_TIMEOUT = 10 + SERIAL_PORT = '/dev/ttyS1' if platform.system() == 'Windows': SERIAL_PORT = 'COM2' diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index fffff91e..a8c04d19 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -16,6 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . from base64 import b64decode +import os import re from cloudinit import log as logging @@ -46,16 +47,23 @@ class DataSourceCloudSigma(sources.DataSource): Uses dmidecode to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ + uname_arch = os.uname()[4] + if uname_arch.startswith("arm") or uname_arch == "aarch64": + # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process + LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)") + return False + dmidecode_path = util.which('dmidecode') if not dmidecode_path: return False LOG.debug("Determining hypervisor product name via dmidecode") try: - system_product_name, _ = util.subp([dmidecode_path, "-s", "system-product-name"]) + cmd = [dmidecode_path, "--string", "system-product-name"] + system_product_name, _ = util.subp(cmd) return 'cloudsigma' in system_product_name.lower() except: - LOG.exception("Failed to get hypervisor product name") + LOG.warn("Failed to get hypervisor product name via dmidecode") return False -- cgit v1.2.3 From 2bb228751a223f21296ff9166b42583c670359a5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 2 Jun 2014 16:56:31 -0400 Subject: SmartOS test: do not require existance of /dev/ttyS1. LP: #1316597 --- ChangeLog | 1 + cloudinit/sources/DataSourceSmartOS.py | 10 ++++++++-- tests/unittests/test_datasource/test_smartos.py | 1 + 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 2dee548e..c455f469 100644 --- a/ChangeLog +++ b/ChangeLog @@ -3,6 +3,7 @@ - Enable vendordata on CloudSigma datasource (LP: #1303986) - Poll on /dev/ttyS1 in CloudSigma datasource only if dmidecode says we're running on cloudsigma (LP: #1316475) [Kiril Vladimiroff] + - SmartOS test: do not require existance of /dev/ttyS1. [LP: #1316597] 0.7.5: - open 0.7.5 - Add a debug log message around import failures diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 7c1eb09a..65ec0339 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -170,8 +170,9 @@ class DataSourceSmartOS(sources.DataSource): md = {} ud = "" - if not os.path.exists(self.seed): - LOG.debug("Host does not appear to be on SmartOS") + if not device_exists(self.seed): + LOG.debug("No serial device '%s' found for SmartOS datasource", + self.seed) return False uname_arch = os.uname()[4] @@ -274,6 +275,11 @@ class DataSourceSmartOS(sources.DataSource): b64=b64) +def device_exists(device): + """Symplistic method to determine if the device exists or not""" + return os.path.exists(device) + + def get_serial(seed_device, seed_timeout): """This is replaced in unit testing, allowing us to replace serial.Serial with a mocked class. diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 45f1708a..f64aea07 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -171,6 +171,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): self.apply_patches([(mod, 'get_serial', _get_serial)]) self.apply_patches([(mod, 'dmi_data', _dmi_data)]) self.apply_patches([(os, 'uname', _os_uname)]) + self.apply_patches([(mod, 'device_exists', lambda d: True)]) dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None, paths=self.paths) return dsrc -- cgit v1.2.3 From 9a61ab153ab9d68fb796650666d4ad6729926685 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 9 Jun 2014 20:38:09 -0400 Subject: doc: fix user-groups doc to reference plural ssh-authorized-keys LP: #1327065 --- ChangeLog | 2 ++ doc/examples/cloud-config-user-groups.txt | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index c455f469..7a35d324 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,8 @@ - Poll on /dev/ttyS1 in CloudSigma datasource only if dmidecode says we're running on cloudsigma (LP: #1316475) [Kiril Vladimiroff] - SmartOS test: do not require existance of /dev/ttyS1. [LP: #1316597] + - doc: fix user-groups doc to reference plural ssh-authorized-keys + (LP: #1327065) [Joern Heissler] 0.7.5: - open 0.7.5 - Add a debug log message around import failures diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 01548380..31491faf 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -69,7 +69,7 @@ users: # no-user-group: When set to true, do not create a group named after the user. # no-log-init: When set to true, do not initialize lastlog and faillog database. # ssh-import-id: Optional. Import SSH ids -# ssh-authorized-key: Optional. Add key to user's ssh authorized keys file +# ssh-authorized-keys: Optional. [list] Add keys to user's authorized keys file # sudo: Defaults to none. Set to the sudo string you want to use, i.e. # ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following # format. -- cgit v1.2.3 From 5d88d17e850a42c2c2b241d7fd2d840b1c3213fb Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 29 Jun 2014 11:34:02 -0700 Subject: Move to a rst format for the TODO file Adjust the TODO file to be in rst format (which is nicer to look at) and remove some of the TODO items that are no longer relevant. --- TODO | 46 ---------------------------------------------- TODO.rst | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 46 deletions(-) delete mode 100644 TODO create mode 100644 TODO.rst diff --git a/TODO b/TODO deleted file mode 100644 index 792bc63d..00000000 --- a/TODO +++ /dev/null @@ -1,46 +0,0 @@ -- Consider a 'failsafe' DataSource - If all others fail, setting a default that - - sets the user password, writing it to console - - logs to console that this happened -- Consider a 'previous' DataSource - If no other data source is found, fall back to the 'previous' one - keep a indication of what instance id that is in /var/lib/cloud -- Rewrite "cloud-init-query" (currently not implemented) - Possibly have DataSource and cloudinit expose explicit fields - - instance-id - - hostname - - mirror - - release - - ssh public keys -- Remove the conversion of the ubuntu network interface format conversion - to a RH/fedora format and replace it with a top level format that uses - the netcf libraries format instead (which itself knows how to translate - into the specific formats) -- Replace the 'apt*' modules with variants that now use the distro classes - to perform distro independent packaging commands (where possible) -- Canonicalize the semaphore/lock name for modules and user data handlers - a. It is most likely a bug that currently exists that if a module in config - alters its name and it has already ran, then it will get ran again since - the lock name hasn't be canonicalized -- Replace some the LOG.debug calls with a LOG.info where appropriate instead - of how right now there is really only 2 levels (WARN and DEBUG) -- Remove the 'cc_' for config modules, either have them fully specified (ie - 'cloudinit.config.resizefs') or by default only look in the 'cloudinit.config' - for these modules (or have a combination of the above), this avoids having - to understand where your modules are coming from (which can be altered by - the current python inclusion path) -- Depending on if people think the wrapper around 'os.path.join' provided - by the 'paths' object is useful (allowing us to modify based off a 'read' - and 'write' configuration based 'root') or is just to confusing, it might be - something to remove later, and just recommend using 'chroot' instead (or the X - different other options which are similar to 'chroot'), which is might be more - natural and less confusing... -- Instead of just warning when a module is being ran on a 'unknown' distribution - perhaps we should not run that module in that case? Or we might want to start - reworking those modules so they will run on all distributions? Or if that is - not the case, then maybe we want to allow fully specified python paths for - modules and start encouraging packages of 'ubuntu' modules, packages of 'rhel' - specific modules that people can add instead of having them all under the - cloud-init 'root' tree? This might encourage more development of other modules - instead of having to go edit the cloud-init code to accomplish this. - diff --git a/TODO.rst b/TODO.rst new file mode 100644 index 00000000..50e09f28 --- /dev/null +++ b/TODO.rst @@ -0,0 +1,46 @@ +============================================== +Things that cloud-init may do (better) someday +============================================== + +- Consider a ``failsafe`` ``DataSource``, if all others datasources fail, + using a default ``DataSource`` that does the following would be really nice: + + - sets the user password, writing it to console + - logs to console that this happened + +- Consider a ``previous`` ``DataSource``, if no other data source is + found, fall back to the ``previous`` one that worked. +- Rewrite ``cloud-init-query`` (currently not implemented) +- Possibly have a ``DataSource`` expose explicit fields: + + - instance-id + - hostname + - mirror + - release + - ssh public keys + +- Remove the conversion of the ubuntu network interface format conversion + to a RH/fedora format and replace it with a top level format that uses + the netcf libraries format instead (which itself knows how to translate + into the specific formats). See for example `netcf`_ which seems to be + an active project that has this capability. +- Replace the ``apt*`` modules with variants that now use the distro classes + to perform distro independent packaging commands (wherever possible). +- Replace some the LOG.debug calls with a LOG.info where appropriate instead + of how right now there is really only 2 levels (``WARN`` and ``DEBUG``) +- Remove the ``cc_`` prefix for config modules, either have them fully + specified (ie ``cloudinit.config.resizefs``) or by default only look in + the ``cloudinit.config`` namespace for these modules (or have a combination + of the above), this avoids having to understand where your modules are + coming from (which can be altered by the current python inclusion path) +- Instead of just warning when a module is being ran on a ``unknown`` + distribution perhaps we should not run that module in that case? Or we might + want to start reworking those modules so they will run on all + distributions? Or if that is not the case, then maybe we want to allow + fully specified python paths for modules and start encouraging + packages of ``ubuntu`` modules, packages of ``rhel`` specific modules that + people can add instead of having them all under the cloud-init ``root`` + tree? This might encourage more development of other modules instead of + having to go edit the cloud-init code to accomplish this. + +.. _netcf: https://fedorahosted.org/netcf/ -- cgit v1.2.3 From a3033269ba38057716d93e65a77a8257acc628d5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 15 Jul 2014 15:09:08 -0700 Subject: Remove some of the more useless debug logs --- cloudinit/importer.py | 4 ---- cloudinit/mergers/__init__.py | 5 ----- cloudinit/stages.py | 8 +++++--- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/cloudinit/importer.py b/cloudinit/importer.py index a094141a..a1929137 100644 --- a/cloudinit/importer.py +++ b/cloudinit/importer.py @@ -45,8 +45,6 @@ def find_module(base_name, search_paths, required_attrs=None): real_path.append(base_name) full_path = '.'.join(real_path) real_paths.append(full_path) - LOG.debug("Looking for modules %s that have attributes %s", - real_paths, required_attrs) for full_path in real_paths: mod = None try: @@ -62,6 +60,4 @@ def find_module(base_name, search_paths, required_attrs=None): found_attrs += 1 if found_attrs == len(required_attrs): found_places.append(full_path) - LOG.debug("Found %s with attributes %s in %s", base_name, - required_attrs, found_places) return found_places diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 0978b2c6..650b42a9 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -55,9 +55,6 @@ class UnknownMerger(object): if not meth: meth = self._handle_unknown args.insert(0, method_name) - LOG.debug("Merging '%s' into '%s' using method '%s' of '%s'", - type_name, type_utils.obj_name(merge_with), - meth.__name__, self) return meth(*args) @@ -84,8 +81,6 @@ class LookupMerger(UnknownMerger): # First one that has that method/attr gets to be # the one that will be called meth = getattr(merger, meth_wanted) - LOG.debug(("Merging using located merger '%s'" - " since it had method '%s'"), merger, meth_wanted) break if not meth: return UnknownMerger._handle_unknown(self, meth_wanted, diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 58349ffc..9e071fc4 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -397,8 +397,8 @@ class Init(object): mod = handlers.fixup_handler(mod) types = c_handlers.register(mod) if types: - LOG.debug("Added custom handler for %s from %s", - types, fname) + LOG.debug("Added custom handler for %s [%s] from %s", + types, mod, fname) except Exception: util.logexc(LOG, "Failed to register handler from %s", fname) @@ -644,6 +644,8 @@ class Modules(object): freq = mod.frequency if not freq in FREQUENCIES: freq = PER_INSTANCE + LOG.debug("Running module %s (%s) with frequency %s", + name, mod, freq) # Use the configs logger and not our own # TODO(harlowja): possibly check the module @@ -657,7 +659,7 @@ class Modules(object): run_name = "config-%s" % (name) cc.run(run_name, mod.handle, func_args, freq=freq) except Exception as e: - util.logexc(LOG, "Running %s (%s) failed", name, mod) + util.logexc(LOG, "Running module %s (%s) failed", name, mod) failures.append((name, e)) return (which_ran, failures) -- cgit v1.2.3 From 1c509f3d9340bec9357428bd9f84dd290c642cff Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 15 Jul 2014 18:38:05 -0700 Subject: Add more useful execution continuing message --- bin/cloud-init | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bin/cloud-init b/bin/cloud-init index 6ede60af..ad0ba214 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -224,6 +224,9 @@ def main_init(name, args): LOG.debug("Exiting early due to the existence of %s files", existing_files) return (None, []) + else: + LOG.debug("Execution continuing, no previous run detected that" + " would allow us to stop early.") else: # The cache is not instance specific, so it has to be purged # but we want 'start' to benefit from a cache if -- cgit v1.2.3 From 81525fd93541b41d31b6da13df61a0494cc1e7f6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 16 Jul 2014 12:57:24 -0700 Subject: Fix a few tests that have been failing in python 2.6 A few of the current tests have been continually failing in python 2.6 based systems, due to lack of unit test functions that are now added to ensure we can run the unit tests (and not have to ignore those failures) on python 2.6 --- tests/unittests/helpers.py | 24 ++++++++++++++++++++++ tests/unittests/test_datasource/test_cloudsigma.py | 5 +++-- tests/unittests/test_datasource/test_gce.py | 5 +++-- 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 5bed13cc..970eb8cb 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -52,6 +52,30 @@ if PY26: standardMsg = standardMsg % (value) self.fail(self._formatMessage(msg, standardMsg)) + def assertDictContainsSubset(self, expected, actual, msg=None): + missing = [] + mismatched = [] + for k, v in expected.iteritems(): + if k not in actual: + missing.append(k) + elif actual[k] != v: + mismatched.append('%r, expected: %r, actual: %r' + % (k, v, actual[k])) + + if len(missing) == 0 and len(mismatched) == 0: + return + + standardMsg = '' + if missing: + standardMsg = 'Missing: %r' % ','.join(m for m in missing) + if mismatched: + if standardMsg: + standardMsg += '; ' + standardMsg += 'Mismatched values: %s' % ','.join(mismatched) + + self.fail(self._formatMessage(msg, standardMsg)) + + else: class TestCase(unittest.TestCase): pass diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index f92e07b7..eadb3cb7 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -1,10 +1,11 @@ # coding: utf-8 import copy -from unittest import TestCase from cloudinit.cs_utils import Cepko from cloudinit.sources import DataSourceCloudSigma +from tests.unittests import helpers as test_helpers + SERVER_CONTEXT = { "cpu": 1000, @@ -36,7 +37,7 @@ class CepkoMock(Cepko): return self -class DataSourceCloudSigmaTest(TestCase): +class DataSourceCloudSigmaTest(test_helpers.TestCase): def setUp(self): self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") self.datasource.is_running_in_cloudsigma = lambda: True diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index d91bd531..1979a0de 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -15,7 +15,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import unittest import httpretty import re @@ -25,6 +24,8 @@ from cloudinit import settings from cloudinit import helpers from cloudinit.sources import DataSourceGCE +from tests.unittests import helpers as test_helpers + GCE_META = { 'instance/id': '123', 'instance/zone': 'foo/bar', @@ -54,7 +55,7 @@ def _request_callback(method, uri, headers): return (404, headers, '') -class TestDataSourceGCE(unittest.TestCase): +class TestDataSourceGCE(test_helpers.TestCase): def setUp(self): self.ds = DataSourceGCE.DataSourceGCE( -- cgit v1.2.3