From 4a86775c9cff53a5598db8f4a395abe7c228a147 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 18 Aug 2012 21:15:52 -0700 Subject: Add support for printing out the authkey's for the default user. 1. Adjust the sshutil so that it has functions for doing this (used by the previous functions) 2. Create a new module that pretty prints out the given authorized keys fetched (if any) using the standard md5 scheme (for now), this module can be disabled by setting 'no_ssh_fingerprints' or just removing it from the running list. --- cloudinit/config/cc_ssh_authkey_fingerprints.py | 86 ++++++++++++++++++++++ cloudinit/ssh_util.py | 94 ++++++++++++++----------- 2 files changed, 140 insertions(+), 40 deletions(-) create mode 100644 cloudinit/config/cc_ssh_authkey_fingerprints.py (limited to 'cloudinit') diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py new file mode 100644 index 00000000..d4f136c2 --- /dev/null +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -0,0 +1,86 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import base64 +import glob +import hashlib +import os + +from prettytable import PrettyTable + +from cloudinit import util +from cloudinit import ssh_util + +FP_HASH_TYPE = 'md5' +FP_SEGMENT_LEN = 2 +FP_SEGMENT_SEP = ":" + + +def _split_hash(bin_hash): + split_up = [] + for i in xrange(0, len(bin_hash), FP_SEGMENT_LEN): + split_up.append(bin_hash[i:i+FP_SEGMENT_LEN]) + return split_up + + +def _gen_fingerprint(b64_text): + if not b64_text: + return '' + # Maybe we should feed this into 'ssh -lf'? + try: + bin_text = base64.b64decode(b64_text) + hasher = hashlib.new(FP_HASH_TYPE) + hasher.update(bin_text) + pp_hash = FP_SEGMENT_SEP.join(_split_hash(hasher.hexdigest())) + return pp_hash + except TypeError: + return '' + + +def _pprint_key_entries(user, key_fn, key_entries, prefix='ci-info: '): + if not key_entries: + message = "%sno authorized ssh keys fingerprints found for user %s." % (prefix, user) + util.multi_log(message) + return + tbl_fields = ['Keytype', 'Fingerprint', 'Options', 'Comment'] + tbl = PrettyTable(tbl_fields) + for entry in key_entries: + row = [] + row.append(entry.keytype or '-') + row.append(_gen_fingerprint(entry.base64) or '-') + row.append(entry.comment or '-') + row.append(entry.options or '-') + tbl.add_row(row) + authtbl_s = tbl.get_string() + max_len = len(max(authtbl_s.splitlines(), key=len)) + lines = [ + util.center("Authorized keys fingerprints from %s for user %s" % (key_fn, user), "+", max_len), + ] + lines.extend(authtbl_s.splitlines()) + for line in lines: + util.multi_log(text="%s%s\n" % (prefix, line)) + + +def handle(name, cfg, cloud, log, _args): + if 'no_ssh_fingerprints' in cfg: + log.debug(("Skipping module named %s, " + "logging of ssh fingerprints disabled"), name) + + user = util.get_cfg_option_str(cfg, "user", "ubuntu") + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(user, cloud.paths) + _pprint_key_entries(user, auth_key_fn, auth_key_entries) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index e0a2f0ca..88a11a1a 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -181,12 +181,11 @@ def parse_authorized_keys(fname): return contents -def update_authorized_keys(fname, keys): - entries = parse_authorized_keys(fname) +def update_authorized_keys(old_entries, keys): to_add = list(keys) - for i in range(0, len(entries)): - ent = entries[i] + for i in range(0, len(old_entries)): + ent = old_entries[i] if ent.empty() or not ent.base64: continue # Replace those with the same base64 @@ -199,66 +198,81 @@ def update_authorized_keys(fname, keys): # Don't add it later if k in to_add: to_add.remove(k) - entries[i] = ent + old_entries[i] = ent # Now append any entries we did not match above for key in to_add: - entries.append(key) + old_entries.append(key) # Now format them back to strings... - lines = [str(b) for b in entries] + lines = [str(b) for b in old_entries] # Ensure it ends with a newline lines.append('') return '\n'.join(lines) -def setup_user_keys(keys, user, key_prefix, paths): - # Make sure the users .ssh dir is setup accordingly - pwent = pwd.getpwnam(user) - ssh_dir = os.path.join(pwent.pw_dir, '.ssh') - ssh_dir = paths.join(False, ssh_dir) - if not os.path.exists(ssh_dir): - util.ensure_dir(ssh_dir, mode=0700) - util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) +def users_ssh_info(username, paths): + pw_ent = pwd.getpwnam(username) + if not pw_ent: + raise RuntimeError("Unable to get ssh info for user %r" % (username)) + ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh')) + return (ssh_dir, pw_ent) - # Turn the keys given into actual entries - parser = AuthKeyLineParser() - key_entries = [] - for k in keys: - key_entries.append(parser.parse(str(k), def_opt=key_prefix)) +def extract_authorized_keys(username, paths): + (ssh_dir, pw_ent) = users_ssh_info(username, paths) sshd_conf_fn = paths.join(True, DEF_SSHD_CFG) + auth_key_fn = None with util.SeLinuxGuard(ssh_dir, recursive=True): try: - # AuthorizedKeysFile may contain tokens + # The 'AuthorizedKeysFile' may contain tokens # of the form %T which are substituted during connection set-up. # The following tokens are defined: %% is replaced by a literal # '%', %h is replaced by the home directory of the user being # authenticated and %u is replaced by the username of that user. ssh_cfg = parse_ssh_config_map(sshd_conf_fn) - akeys = ssh_cfg.get("authorizedkeysfile", '') - akeys = akeys.strip() - if not akeys: - akeys = "%h/.ssh/authorized_keys" - akeys = akeys.replace("%h", pwent.pw_dir) - akeys = akeys.replace("%u", user) - akeys = akeys.replace("%%", '%') - if not akeys.startswith('/'): - akeys = os.path.join(pwent.pw_dir, akeys) - authorized_keys = paths.join(False, akeys) + auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip() + if not auth_key_fn: + auth_key_fn = "%h/.ssh/authorized_keys" + auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir) + auth_key_fn = auth_key_fn.replace("%u", username) + auth_key_fn = auth_key_fn.replace("%%", '%') + if not auth_key_fn.startswith('/'): + auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn) + auth_key_fn = paths.join(False, auth_key_fn) except (IOError, OSError): - authorized_keys = os.path.join(ssh_dir, 'authorized_keys') + # Give up and use a default key filename + auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'" " in ssh config" - " from %s, using 'AuthorizedKeysFile' file" - " %s instead"), - sshd_conf_fn, authorized_keys) - - content = update_authorized_keys(authorized_keys, key_entries) - util.ensure_dir(os.path.dirname(authorized_keys), mode=0700) - util.write_file(authorized_keys, content, mode=0600) - util.chownbyid(authorized_keys, pwent.pw_uid, pwent.pw_gid) + " from %r, using 'AuthorizedKeysFile' file" + " %r instead"), + sshd_conf_fn, auth_key_fn) + auth_key_entries = parse_authorized_keys(auth_key_fn) + return (auth_key_fn, auth_key_entries) + + +def setup_user_keys(keys, username, key_prefix, paths): + # Make sure the users .ssh dir is setup accordingly + (ssh_dir, pwent) = users_ssh_info(username, paths) + if not os.path.isdir(ssh_dir): + util.ensure_dir(ssh_dir, mode=0700) + util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) + + # Turn the 'update' keys given into actual entries + parser = AuthKeyLineParser() + key_entries = [] + for k in keys: + key_entries.append(parser.parse(str(k), def_opt=key_prefix)) + + # Extract the old and make the new + (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths) + with util.SeLinuxGuard(ssh_dir, recursive=True): + content = update_authorized_keys(auth_key_entries, key_entries) + util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700) + util.write_file(auth_key_fn, content, mode=0600) + util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid) class SshdConfigLine(object): -- cgit v1.2.3 From 0247b1be0ae3d1bc913b5e368dadf22e26b54b86 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 19 Aug 2012 22:28:14 -0700 Subject: Add a 'none' datasource as a last resort fallback 1. This will allow a basically empty datasource to be activated (as the last datasource) when no other datasources work. This allows modules to still run (if they can, new function added to the datasource if modules want to check if cloud-init is in this 'disconnected' state). --- cloudinit/settings.py | 4 ++- cloudinit/sources/DataSourceNone.py | 59 +++++++++++++++++++++++++++++++++++++ cloudinit/sources/__init__.py | 4 +++ 3 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 cloudinit/sources/DataSourceNone.py (limited to 'cloudinit') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index cdfc31ae..8cc9e3b4 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -35,7 +35,9 @@ CFG_BUILTIN = { 'OVF', 'MAAS', 'Ec2', - 'CloudStack' + 'CloudStack', + # At the end to act as a 'catch' when none of the above work... + 'None', ], 'def_log_file': '/var/log/cloud-init.log', 'log_cfgs': [], diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py new file mode 100644 index 00000000..e53eb280 --- /dev/null +++ b/cloudinit/sources/DataSourceNone.py @@ -0,0 +1,59 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util + +LOG = logging.getLogger(__name__) + +NONE_IID = 'iid-datasource-none' + + +class DataSourceNone(sources.DataSource): + def __init__(self, sys_cfg, distro, paths, ud_proc=None): + sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) + self.userdata = {} + self.metadata = {} + self.userdata_raw = '' + + def get_data(self): + return True + + def get_instance_id(self): + return NONE_IID + + def __str__(self): + return util.obj_name(self) + + @property + def is_disconnected(self): + return True + + +# Used to match classes to dependencies (this will always match) +datasources = [ + (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceNone, (sources.DEP_FILESYSTEM,)), + (DataSourceNone, []), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index b25724a5..ca9f58e5 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -65,6 +65,10 @@ class DataSource(object): self.userdata = self.ud_proc.process(raw_data) return self.userdata + @property + def is_disconnected(self): + return False + def get_userdata_raw(self): return self.userdata_raw -- cgit v1.2.3 From d308ee4363d7b9601f73dbd9166594c16f9e9601 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 20 Aug 2012 12:07:41 -0700 Subject: Fixup the columns and add a check to make sure that a key given is one that we actually want to print out. Also add in a config option which lets people select a different hashing method (not md5 if they want). --- cloudinit/config/cc_ssh_authkey_fingerprints.py | 59 ++++++++++++++----------- 1 file changed, 32 insertions(+), 27 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index d4f136c2..6fb7d7fe 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -26,52 +26,56 @@ from prettytable import PrettyTable from cloudinit import util from cloudinit import ssh_util -FP_HASH_TYPE = 'md5' -FP_SEGMENT_LEN = 2 -FP_SEGMENT_SEP = ":" - def _split_hash(bin_hash): split_up = [] - for i in xrange(0, len(bin_hash), FP_SEGMENT_LEN): - split_up.append(bin_hash[i:i+FP_SEGMENT_LEN]) + for i in xrange(0, len(bin_hash), 2): + split_up.append(bin_hash[i:i+2]) return split_up -def _gen_fingerprint(b64_text): +def _gen_fingerprint(b64_text, hash_meth='md5'): if not b64_text: return '' - # Maybe we should feed this into 'ssh -lf'? + # TBD(harlowja): Maybe we should feed this into 'ssh -lf'? try: - bin_text = base64.b64decode(b64_text) - hasher = hashlib.new(FP_HASH_TYPE) - hasher.update(bin_text) - pp_hash = FP_SEGMENT_SEP.join(_split_hash(hasher.hexdigest())) - return pp_hash + hasher = hashlib.new(hash_meth) + hasher.update(base64.b64decode(b64_text)) + return ":".join(_split_hash(hasher.hexdigest())) except TypeError: - return '' + # Raised when b64 not really b64... + return '?' + + +def _is_printable_key(entry): + if any([entry.keytype, entry.base64, entry.comment, entry.options]): + if entry.keytype and entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']: + return True + return False -def _pprint_key_entries(user, key_fn, key_entries, prefix='ci-info: '): +def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', prefix='ci-info: '): if not key_entries: message = "%sno authorized ssh keys fingerprints found for user %s." % (prefix, user) util.multi_log(message) return - tbl_fields = ['Keytype', 'Fingerprint', 'Options', 'Comment'] + tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options', 'Comment'] tbl = PrettyTable(tbl_fields) for entry in key_entries: - row = [] - row.append(entry.keytype or '-') - row.append(_gen_fingerprint(entry.base64) or '-') - row.append(entry.comment or '-') - row.append(entry.options or '-') - tbl.add_row(row) + if _is_printable_key(entry): + row = [] + row.append(entry.keytype or '-') + row.append(_gen_fingerprint(entry.base64, hash_meth) or '-') + row.append(entry.options or '-') + row.append(entry.comment or '-') + tbl.add_row(row) authtbl_s = tbl.get_string() - max_len = len(max(authtbl_s.splitlines(), key=len)) + authtbl_lines = authtbl_s.splitlines() + max_len = len(max(authtbl_lines, key=len)) lines = [ util.center("Authorized keys fingerprints from %s for user %s" % (key_fn, user), "+", max_len), ] - lines.extend(authtbl_s.splitlines()) + lines.extend(authtbl_lines) for line in lines: util.multi_log(text="%s%s\n" % (prefix, line)) @@ -81,6 +85,7 @@ def handle(name, cfg, cloud, log, _args): log.debug(("Skipping module named %s, " "logging of ssh fingerprints disabled"), name) - user = util.get_cfg_option_str(cfg, "user", "ubuntu") - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(user, cloud.paths) - _pprint_key_entries(user, auth_key_fn, auth_key_entries) + user_name = util.get_cfg_option_str(cfg, "user", "ubuntu") + hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(user_name, cloud.paths) + _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth) -- cgit v1.2.3 From c1d2bc7ff9824b967cca21ed0254e4ee47168b10 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 20 Aug 2012 12:20:26 -0700 Subject: Remove the matching of the filesystem dep and add in the ability to use any fallback userdata or metadata found in the datasource config (if provided). --- cloudinit/sources/DataSourceNone.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index e53eb280..b186113c 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -22,8 +22,6 @@ from cloudinit import util LOG = logging.getLogger(__name__) -NONE_IID = 'iid-datasource-none' - class DataSourceNone(sources.DataSource): def __init__(self, sys_cfg, distro, paths, ud_proc=None): @@ -33,10 +31,17 @@ class DataSourceNone(sources.DataSource): self.userdata_raw = '' def get_data(self): + # If the datasource config has any provided 'fallback' + # userdata or metadata, use it... + if 'userdata' in self.ds_cfg: + self.userdata = self.ds_cfg['userdata'] + self.userdata_raw = util.yaml_dumps(self.userdata) + if 'metadata' in self.ds_cfg: + self.metadata = self.ds_cfg['metadata'] return True def get_instance_id(self): - return NONE_IID + return 'iid-datasource-none' def __str__(self): return util.obj_name(self) @@ -46,10 +51,9 @@ class DataSourceNone(sources.DataSource): return True -# Used to match classes to dependencies (this will always match) +# Used to match classes to dependencies datasources = [ (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), - (DataSourceNone, (sources.DEP_FILESYSTEM,)), (DataSourceNone, []), ] -- cgit v1.2.3 From e65604ca64e16c4ee5bf2467c4424954eddfc390 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 20 Aug 2012 21:46:40 -0400 Subject: remove duplicate printing of authorized keys to the console see LP: #1039303 for more information. Hopefully we'll get a good fix there. --- cloudinit/config/cc_ssh_authkey_fingerprints.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 087cc15e..ebd9d6c8 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -77,12 +77,13 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', prefix='ci-i authtbl_lines = authtbl_s.splitlines() max_len = len(max(authtbl_lines, key=len)) lines = [ - util.center("Authorized keys fingerprints from %s for user %s" % + util.center("Authorized keys from %s for user %s" % (key_fn, user), "+", max_len), ] lines.extend(authtbl_lines) for line in lines: - util.multi_log(text="%s%s\n" % (prefix, line)) + util.multi_log(text="%s%s\n" % (prefix, line), + stderr=False, console=True) def handle(name, cfg, cloud, log, _args): -- cgit v1.2.3 From e60058ce92b59883da221d3e889ed62bd9b69c14 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 21 Aug 2012 22:50:28 -0400 Subject: remove committed conflicts in previous merge --- cloudinit/config/cc_ssh_authkey_fingerprints.py | 5 ----- 1 file changed, 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index ebd9d6c8..68684c3d 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -27,13 +27,8 @@ from cloudinit import util def _split_hash(bin_hash): split_up = [] -<<<<<<< TREE - for i in xrange(0, len(bin_hash), FP_SEGMENT_LEN): - split_up.append(bin_hash[i:i + FP_SEGMENT_LEN]) -======= for i in xrange(0, len(bin_hash), 2): split_up.append(bin_hash[i:i+2]) ->>>>>>> MERGE-SOURCE return split_up -- cgit v1.2.3 From 49242c2a2e7e0ab6812de741b4ac2e8d1888ad08 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Aug 2012 00:16:29 -0400 Subject: fix pylint in cc_ssh_authkey_fingerprints.py --- cloudinit/config/cc_ssh_authkey_fingerprints.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 68684c3d..81d6e89e 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -47,18 +47,21 @@ def _gen_fingerprint(b64_text, hash_meth='md5'): def _is_printable_key(entry): if any([entry.keytype, entry.base64, entry.comment, entry.options]): - if entry.keytype and entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']: + if (entry.keytype and + entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']): return True return False -def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', prefix='ci-info: '): +def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', + prefix='ci-info: '): if not key_entries: message = ("%sno authorized ssh keys fingerprints found for user %s." % (prefix, user)) util.multi_log(message) return - tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options', 'Comment'] + tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options', + 'Comment'] tbl = PrettyTable(tbl_fields) for entry in key_entries: if _is_printable_key(entry): @@ -87,6 +90,7 @@ def handle(name, cfg, cloud, log, _args): "logging of ssh fingerprints disabled"), name) user_name = util.get_cfg_option_str(cfg, "user", "ubuntu") + hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") extract = ssh_util.extract_authorized_keys (auth_key_fn, auth_key_entries) = extract(user_name, cloud.paths) _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth) -- cgit v1.2.3 From d713d7bc5e2b308d11364ccc8701fc2968f9f151 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Aug 2012 00:28:29 -0400 Subject: rework package mirror selection There are several changes here. * Datasource now has a 'availability_zone' getter. * get_package_mirror_info * Datasource convenience 'get_package_mirror_info' that calls the configured distro, and passes it the availability-zone * distro has a get_package_mirror_info method * get_package_mirror_info returns a dict that of name:mirror this is to facilitate use of 'security' and 'primary' archive. * this supports searching based on templates. Any template that references undefined values is skipped. These templates can contain 'availability_zone' (LP: #1037727) * distro's mirrors can be arch specific (LP: #1028501) * rename_apt_lists supports the "mirror_info" rather than single mirror * generate_sources_list supports mirror_info, and as a result, the ubuntu mirrors reference '$security' rather than security (LP: #1006963) * remove the DataSourceEc2 specific mirror selection, but instead rely on the above filtering, and the fact that 'ec2_region' is only defined if the availability_zone looks like a ec2 az. --- ChangeLog | 4 + cloudinit/cloud.py | 3 - cloudinit/config/cc_apt_update_upgrade.py | 120 +++++++++++++++++------------- cloudinit/distros/__init__.py | 73 +++++++++++++++++- cloudinit/distros/debian.py | 4 + cloudinit/sources/DataSourceCloudStack.py | 3 +- cloudinit/sources/DataSourceEc2.py | 40 ++-------- cloudinit/sources/__init__.py | 10 ++- config/cloud.cfg | 16 +++- templates/sources.list.tmpl | 12 +-- 10 files changed, 182 insertions(+), 103 deletions(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index d6dfc80e..e9e88318 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ 0.7.0: + - allow distro mirror selection to include availability-zone (LP: #1037727) + - allow arch specific mirror selection (select ports.ubuntu.com on arm) + LP: #1028501 + - allow specification of security mirrors (LP: #1006963) - add the 'None' datasource (LP: #906669), which will allow jobs to run even if there is no "real" datasource found. - write ssh authorized keys to console, ssh_authkey_fingerprints diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 22d9167e..620b3c07 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -82,9 +82,6 @@ class Cloud(object): def get_locale(self): return self.datasource.get_locale() - def get_local_mirror(self): - return self.datasource.get_local_mirror() - def get_hostname(self, fqdn=False): return self.datasource.get_hostname(fqdn=fqdn) diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 1bffa47d..b8f2024d 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -50,20 +50,25 @@ def handle(name, cfg, cloud, log, _args): upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) release = get_release() - mirror = find_apt_mirror(cloud, cfg) - if not mirror: + mirrors = find_apt_mirror_info(cloud, cfg) + if not mirrors or "primary" not in mirrors: log.debug(("Skipping module named %s," " no package 'mirror' located"), name) return - log.debug("Selected mirror at: %s" % mirror) + # backwards compatibility + mirror = mirrors["primary"] + mirrors["mirror"] = mirror + + log.debug("mirror info: %s" % mirrors) if not util.get_cfg_option_bool(cfg, 'apt_preserve_sources_list', False): - generate_sources_list(release, mirror, cloud, log) - old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', - "archive.ubuntu.com/ubuntu") - rename_apt_lists(old_mir, mirror) + generate_sources_list(release, mirrors, cloud, log) + old_mirrors = cfg.get('apt_old_mirrors', + old_mirrors = {"primary": "archive.ubuntu.com/ubuntu", + "security": "security.ubuntu.com/ubuntu"}) + rename_apt_lists(old_mirrors, mirrors) # Set up any apt proxy proxy = cfg.get("apt_proxy", None) @@ -81,8 +86,10 @@ def handle(name, cfg, cloud, log, _args): # Process 'apt_sources' if 'apt_sources' in cfg: - errors = add_sources(cloud, cfg['apt_sources'], - {'MIRROR': mirror, 'RELEASE': release}) + params = mirrors + params['RELEASE'] = release + params['MIRROR'] = mirror + errors = add_sources(cloud, cfg['apt_sources'], params) for e in errors: log.warn("Source Error: %s", ':'.join(e)) @@ -146,30 +153,35 @@ def mirror2lists_fileprefix(mirror): return string -def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"): - oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) - nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror)) - if oprefix == nprefix: - return - olen = len(oprefix) - for filename in glob.glob("%s_*" % oprefix): - # TODO use the cloud.paths.join... - util.rename(filename, "%s%s" % (nprefix, filename[olen:])) - +def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): + for (name, omirror) in old_mirrors.iteritems(): + nmirror = new_mirrors.get(name) + if not nmirror: + continue + oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) + nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror)) + if oprefix == nprefix: + continue + olen = len(oprefix) + for filename in glob.glob("%s_*" % oprefix): + util.rename(filename, "%s%s" % (nprefix, filename[olen:])) def get_release(): (stdout, _stderr) = util.subp(['lsb_release', '-cs']) return stdout.strip() -def generate_sources_list(codename, mirror, cloud, log): +def generate_sources_list(codename, mirrors, cloud, log): template_fn = cloud.get_template_filename('sources.list') - if template_fn: - params = {'mirror': mirror, 'codename': codename} - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) - else: + if not template_fn: log.warn("No template found, not rendering /etc/apt/sources.list") + return + + params = {'codename': codename} + for k in mirrors: + params[k] = mirrors[k] + out_fn = cloud.paths.join(False, '/etc/apt/sources.list') + templater.render_to_file(template_fn, out_fn, params) def add_sources(cloud, srclist, template_params=None): @@ -231,43 +243,47 @@ def add_sources(cloud, srclist, template_params=None): return errorlist -def find_apt_mirror(cloud, cfg): +def find_apt_mirror_info(cloud, cfg): """ find an apt_mirror given the cloud and cfg provided """ mirror = None - cfg_mirror = cfg.get("apt_mirror", None) - if cfg_mirror: - mirror = cfg["apt_mirror"] - elif "apt_mirror_search" in cfg: - mirror = util.search_for_mirror(cfg['apt_mirror_search']) - else: - mirror = cloud.get_local_mirror() + # this is less preferred way of specifying mirror preferred would be to + # use the distro's search or package_mirror. + mirror = cfg.get("apt_mirror", None) - mydom = "" + search = cfg.get("apt_mirror_search", None) + if not mirror and search: + mirror = util.search_for_mirror(search) + if (not mirror and + util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): + mydom = "" doms = [] - if not mirror: - # if we have a fqdn, then search its domain portion first - (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) - mydom = ".".join(fqdn.split(".")[1:]) - if mydom: - doms.append(".%s" % mydom) + # if we have a fqdn, then search its domain portion first + (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) + mydom = ".".join(fqdn.split(".")[1:]) + if mydom: + doms.append(".%s" % mydom) - if (not mirror and - util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): - doms.extend((".localdomain", "",)) + doms.extend((".localdomain", "",)) - mirror_list = [] - distro = cloud.distro.name - mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) - for post in doms: - mirror_list.append(mirrorfmt % (post)) + mirror_list = [] + distro = cloud.distro.name + mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) + for post in doms: + mirror_list.append(mirrorfmt % (post)) - mirror = util.search_for_mirror(mirror_list) + mirror = util.search_for_mirror(mirror_list) + + mirror_info = cloud.get_package_mirror_info() - if not mirror: - mirror = cloud.distro.get_package_mirror() + # this is a bit strange. + # if mirror is set, then one of the legacy options above set it + # but they do not cover security. so we need to get that from + # get_package_mirror_info + if mirror: + mirror_info.update({'primary': mirror}) - return mirror + return mirror_info diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index da4d0180..3057ecfc 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -23,6 +23,8 @@ from StringIO import StringIO import abc +import os +import re from cloudinit import importer from cloudinit import log as logging @@ -75,8 +77,26 @@ class Distro(object): def update_package_sources(self): raise NotImplementedError() - def get_package_mirror(self): - return self.get_option('package_mirror') + def get_primary_arch(self): + arch = os.uname[4] + if arch in ("i386", "i486", "i586", "i686"): + return "i386" + return arch + + def _get_arch_package_mirror_info(self, arch=None): + mirror_info = self.get_option("package_mirrors", None) + if arch == None: + arch = self.get_primary_arch() + return _get_arch_package_mirror_info(mirror_info, arch) + + def get_package_mirror_info(self, arch=None, + availability_zone=None): + # this resolves the package_mirrors config option + # down to a single dict of {mirror_name: mirror_url} + arch_info = self._get_arch_package_mirror_info(arch) + + return _get_package_mirror_info(availability_zone=availability_zone, + mirror_info=arch_info) def apply_network(self, settings, bring_up=True): # Write it out @@ -151,6 +171,55 @@ class Distro(object): return False +def _get_package_mirror_info(mirror_info, availability_zone=None, + mirror_filter=util.search_for_mirror): + # given a arch specific 'mirror_info' entry (from package_mirrors) + # search through the 'search' entries, and fallback appropriately + # return a dict with only {name: mirror} entries. + + ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % + "north|northeast|east|southeast|south|southwest|west|northwest") + + unset_value = "_UNSET_VALUE_USED_" + azone = availability_zone + + if azone and re.match(ec2_az_re, azone): + ec2_region = "%s" % azone[0:-1] + elif azone: + ec2_region = unset_value + else: + azone = unset_value + ec2_region = unset_value + + results = {} + for (name, mirror) in mirror_info.get('failsafe', {}).iteritems(): + results[name] = mirror + + for (name, searchlist) in mirror_info.get('search', {}).iteritems(): + mirrors = [m % {'ec2_region': ec2_region, 'availability_zone': azone} + for m in searchlist] + # now filter out anything that used the unset availability zone + mirrors = [m for m in mirrors if m.find(unset_value) < 0] + + found = mirror_filter(mirrors) + if found: + results[name] = found + + LOG.debug("filtered distro mirror info: %s" % results) + + return results + +def _get_arch_package_mirror_info(package_mirrors, arch): + # pull out the specific arch from a 'package_mirrors' config option + default = None + for item in package_mirrors: + arches = item.get("arches") + if arch in arches: + return item + if "default" in arches: + default = item + return default + def fetch(name): locs = importer.find_module(name, ['', __name__], diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 3247d7ce..da8c1a5b 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -147,3 +147,7 @@ class Distro(distros.Distro): def update_package_sources(self): self._runner.run("update-sources", self.package_command, ["update"], freq=PER_INSTANCE) + + def get_primary_arch(self): + (arch, _err) = util.subp(['dpkg', '--print-architecture']) + return str(arch).strip() diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 751bef4f..5c5f8bd7 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -132,7 +132,8 @@ class DataSourceCloudStack(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def get_availability_zone(self): + @property + def availability_zone(self): return self.metadata['availability-zone'] diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index d9eb8f17..556dcafb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -83,40 +83,6 @@ class DataSourceEc2(sources.DataSource): def get_availability_zone(self): return self.metadata['placement']['availability-zone'] - def get_local_mirror(self): - return self.get_mirror_from_availability_zone() - - def get_mirror_from_availability_zone(self, availability_zone=None): - # Return type None indicates there is no cloud specific mirror - # Availability is like 'us-west-1b' or 'eu-west-1a' - if availability_zone is None: - availability_zone = self.get_availability_zone() - - if self.is_vpc(): - return None - - if not availability_zone: - return None - - mirror_tpl = self.distro.get_option('package_mirror_ec2_template', - None) - - if mirror_tpl is None: - return None - - # in EC2, the 'region' is 'us-east-1' if 'zone' is 'us-east-1a' - tpl_params = { - 'zone': availability_zone.strip(), - 'region': availability_zone[:-1] - } - mirror_url = mirror_tpl % (tpl_params) - - found = util.search_for_mirror([mirror_url]) - if found is not None: - return mirror_url - - return None - def _get_url_settings(self): mcfg = self.ds_cfg if not mcfg: @@ -255,6 +221,12 @@ class DataSourceEc2(sources.DataSource): return True return False + @property + def availability_zone(self): + try: + return self.metadata['placement']['availability-zone'] + except KeyError: + return None # Used to match classes to dependencies datasources = [ diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index ca9f58e5..04296193 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -117,9 +117,9 @@ class DataSource(object): def get_locale(self): return 'en_US.UTF-8' - def get_local_mirror(self): - # ?? - return None + @property + def availability_zone(self): + return self.metadata.get('availability-zone') def get_instance_id(self): if not self.metadata or 'instance-id' not in self.metadata: @@ -166,6 +166,10 @@ class DataSource(object): else: return hostname + def get_package_mirror_info(self): + self.distro.get_package_mirror_info( + availability_zone=self.availability_zone) + def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): ds_list = list_sources(cfg_list, ds_deps, pkg_list) diff --git a/config/cloud.cfg b/config/cloud.cfg index 700f3d7a..106ab01a 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -74,6 +74,18 @@ system_info: cloud_dir: /var/lib/cloud/ templates_dir: /etc/cloud/templates/ upstart_dir: /etc/init/ - package_mirror: http://archive.ubuntu.com/ubuntu - package_mirror_ec2_template: http://%(region)s.ec2.archive.ubuntu.com/ubuntu/ + package_mirrors: + - arches: [i386, amd64] + failsafe: + primary: http://archive.ubuntu.com/ubuntu + security: http://security.ubuntu.com/ubuntu + search: + primary: + - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ + - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ + security: [] + - arches: [armhf, armel, default] + failsafe: + primary: http://ports.ubuntu.com/ubuntu + security: http://ports.ubuntu.com/ubuntu ssh_svcname: ssh diff --git a/templates/sources.list.tmpl b/templates/sources.list.tmpl index f702025f..ce395b3d 100644 --- a/templates/sources.list.tmpl +++ b/templates/sources.list.tmpl @@ -52,9 +52,9 @@ deb-src $mirror $codename-updates universe # deb http://archive.canonical.com/ubuntu $codename partner # deb-src http://archive.canonical.com/ubuntu $codename partner -deb http://security.ubuntu.com/ubuntu $codename-security main -deb-src http://security.ubuntu.com/ubuntu $codename-security main -deb http://security.ubuntu.com/ubuntu $codename-security universe -deb-src http://security.ubuntu.com/ubuntu $codename-security universe -# deb http://security.ubuntu.com/ubuntu $codename-security multiverse -# deb-src http://security.ubuntu.com/ubuntu $codename-security multiverse +deb $security $codename-security main +deb-src $security $codename-security main +deb $security $codename-security universe +deb-src $security $codename-security universe +# deb $security $codename-security multiverse +# deb-src $security $codename-security multiverse -- cgit v1.2.3 From 26dd7461ce7ce9a6cba541ece94b802df772168b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Aug 2012 11:38:20 -0400 Subject: return results from datasource.get_package_mirror_info --- cloudinit/sources/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 04296193..4719d254 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -167,7 +167,7 @@ class DataSource(object): return hostname def get_package_mirror_info(self): - self.distro.get_package_mirror_info( + return self.distro.get_package_mirror_info( availability_zone=self.availability_zone) -- cgit v1.2.3 From 3e3e9e90c7eefe2e0a14f9055d23856939aea269 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 22 Aug 2012 10:35:22 -0700 Subject: Fix the userdata being populated to a dict (incorrect) and let it instead be populated by the userdata processor with the raw userdata (either empty or datasource config provided). --- cloudinit/sources/DataSourceNone.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index b186113c..c2125bee 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -26,16 +26,14 @@ LOG = logging.getLogger(__name__) class DataSourceNone(sources.DataSource): def __init__(self, sys_cfg, distro, paths, ud_proc=None): sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) - self.userdata = {} self.metadata = {} self.userdata_raw = '' def get_data(self): # If the datasource config has any provided 'fallback' # userdata or metadata, use it... - if 'userdata' in self.ds_cfg: - self.userdata = self.ds_cfg['userdata'] - self.userdata_raw = util.yaml_dumps(self.userdata) + if 'userdata_raw' in self.ds_cfg: + self.userdata_raw = self.ds_cfg['userdata_raw'] if 'metadata' in self.ds_cfg: self.metadata = self.ds_cfg['metadata'] return True -- cgit v1.2.3 From 451e48732ff7885502db2f8296777fa58b670f3b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Aug 2012 14:12:32 -0400 Subject: fix pep8 complaints. make pep8 now is silent on precise's pep8 ( 0.6.1-2ubuntu2). --- cloudinit/config/cc_apt_pipelining.py | 4 +- cloudinit/config/cc_apt_update_upgrade.py | 4 +- cloudinit/config/cc_bootcmd.py | 2 +- cloudinit/config/cc_emit_upstart.py | 4 +- cloudinit/config/cc_puppet.py | 5 +- cloudinit/config/cc_resizefs.py | 8 +- cloudinit/config/cc_rightscale_userdata.py | 4 +- cloudinit/config/cc_ssh.py | 8 +- cloudinit/config/cc_ssh_authkey_fingerprints.py | 2 +- cloudinit/config/cc_update_etc_hosts.py | 2 +- cloudinit/config/cc_update_hostname.py | 2 +- cloudinit/config/cc_write_files.py | 4 +- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/rhel.py | 4 +- cloudinit/handlers/__init__.py | 2 +- cloudinit/handlers/shell_script.py | 2 +- cloudinit/log.py | 2 +- cloudinit/sources/DataSourceCloudStack.py | 3 +- cloudinit/sources/DataSourceConfigDrive.py | 14 +-- cloudinit/stages.py | 4 +- cloudinit/user_data.py | 4 +- cloudinit/util.py | 17 ++-- tests/unittests/test__init__.py | 14 +-- tests/unittests/test_builtin_handlers.py | 4 +- tests/unittests/test_datasource/test_altcloud.py | 99 ++++++++++++---------- tests/unittests/test_datasource/test_maas.py | 22 ++--- .../test_handler/test_handler_ca_certs.py | 18 ++-- tests/unittests/test_userdata.py | 13 ++- tests/unittests/test_util.py | 4 +- tools/hacking.py | 4 +- tools/mock-meta.py | 24 +++--- 31 files changed, 157 insertions(+), 148 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 3426099e..02056ee0 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -16,8 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from cloudinit import util from cloudinit.settings import PER_INSTANCE +from cloudinit import util frequency = PER_INSTANCE @@ -50,7 +50,7 @@ def handle(_name, cfg, cloud, log, _args): def write_apt_snippet(cloud, setting, log, f_name): - """ Writes f_name with apt pipeline depth 'setting' """ + """Writes f_name with apt pipeline depth 'setting'.""" file_contents = APT_PIPE_TPL % (setting) diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 1bffa47d..e60e1037 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -153,7 +153,7 @@ def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"): return olen = len(oprefix) for filename in glob.glob("%s_*" % oprefix): - # TODO use the cloud.paths.join... + # TODO(harlowja) use the cloud.paths.join... util.rename(filename, "%s%s" % (nprefix, filename[olen:])) @@ -232,7 +232,7 @@ def add_sources(cloud, srclist, template_params=None): def find_apt_mirror(cloud, cfg): - """ find an apt_mirror given the cloud and cfg provided """ + """find an apt_mirror given the cloud and cfg provided.""" mirror = None diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index bae1ea54..896cb4d0 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -20,8 +20,8 @@ import os -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 68b86ff6..6d376184 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -20,8 +20,8 @@ import os -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS @@ -44,5 +44,5 @@ def handle(name, _cfg, cloud, log, args): try: util.subp(cmd) except Exception as e: - # TODO, use log exception from utils?? + # TODO(harlowja), use log exception from utils?? log.warn("Emission of upstart event %s failed due to: %s", n, e) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 467c1496..74ee18e1 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -48,7 +48,8 @@ def handle(name, cfg, cloud, log, _args): # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to - # mix the rest up. First clean them up (TODO is this really needed??) + # mix the rest up. First clean them up + # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), @@ -80,7 +81,7 @@ def handle(name, cfg, cloud, log, _args): for (o, v) in cfg.iteritems(): if o == 'certname': # Expand %f as the fqdn - # TODO should this use the cloud fqdn?? + # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) # Expand %i as the instance id v = v.replace("%i", cloud.get_instance_id()) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 256a194f..e7f27944 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -22,8 +22,8 @@ import os import stat import time -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS @@ -72,12 +72,12 @@ def handle(name, cfg, cloud, log, args): log.debug("Skipping module named %s, resizing disabled", name) return - # TODO is the directory ok to be used?? + # TODO(harlowja) is the directory ok to be used?? resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") resize_root_d = cloud.paths.join(False, resize_root_d) util.ensure_dir(resize_root_d) - # TODO: allow what is to be resized to be configurable?? + # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = cloud.paths.join(False, "/") with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", dir=resize_root_d, delete=True) as tfh: @@ -136,5 +136,5 @@ def do_resize(resize_cmd, log): raise tot_time = time.time() - start log.debug("Resizing took %.3f seconds", tot_time) - # TODO: Should we add a fsck check after this to make + # TODO(harlowja): Should we add a fsck check after this to make # sure we didn't corrupt anything? diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index 45d41b3f..4bf18516 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -37,9 +37,9 @@ import os +from cloudinit.settings import PER_INSTANCE from cloudinit import url_helper as uhelp from cloudinit import util -from cloudinit.settings import PER_INSTANCE from urlparse import parse_qs @@ -72,7 +72,7 @@ def handle(name, _cfg, cloud, log, _args): captured_excps = [] # These will eventually be then ran by the cc_scripts_user - # TODO: maybe this should just be a new user data handler?? + # TODO(harlowja): maybe this should just be a new user data handler?? # Instead of a late module that acts like a user data handler? scripts_d = cloud.get_ipath_cur('scripts') urls = mdict[MY_HOOKNAME] diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 4019ae90..3431bd2a 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -18,11 +18,11 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import os import glob +import os -from cloudinit import util from cloudinit import ssh_util +from cloudinit import util DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding," "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " @@ -76,7 +76,7 @@ def handle(_name, cfg, cloud, log, _args): pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0]) cmd = ['sh', '-xc', KEY_GEN_TPL % pair] try: - # TODO: Is this guard needed? + # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): util.subp(cmd, capture=False) log.debug("Generated a key for %s from %s", pair[0], pair[1]) @@ -94,7 +94,7 @@ def handle(_name, cfg, cloud, log, _args): if not os.path.exists(keyfile): cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] try: - # TODO: Is this guard needed? + # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): util.subp(cmd, capture=False) except: diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 81d6e89e..23f5755a 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -28,7 +28,7 @@ from cloudinit import util def _split_hash(bin_hash): split_up = [] for i in xrange(0, len(bin_hash), 2): - split_up.append(bin_hash[i:i+2]) + split_up.append(bin_hash[i:i + 2]) return split_up diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 38108da7..4d75000f 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -18,8 +18,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from cloudinit import util from cloudinit import templater +from cloudinit import util from cloudinit.settings import PER_ALWAYS diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index b84a1a06..1d6679ea 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -20,8 +20,8 @@ import os -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 1bfa4c25..a73d6f4e 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -19,8 +19,8 @@ import base64 import os -from cloudinit import util from cloudinit.settings import PER_INSTANCE +from cloudinit import util frequency = PER_INSTANCE @@ -46,7 +46,7 @@ def canonicalize_extraction(encoding_type, log): return ['application/x-gzip'] if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']: return ['application/base64', 'application/x-gzip'] - # Yaml already encodes binary data as base64 if it is given to the + # Yaml already encodes binary data as base64 if it is given to the # yaml file as binary, so those will be automatically decoded for you. # But the above b64 is just for people that are more 'comfortable' # specifing it manually (which might be a possiblity) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index da4d0180..b9609b7a 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -28,7 +28,7 @@ from cloudinit import importer from cloudinit import log as logging from cloudinit import util -# TODO: Make this via config?? +# TODO(harlowja): Make this via config?? IFACE_ACTIONS = { 'up': ['ifup', '--all'], 'down': ['ifdown', '--all'], diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 7fa69f03..d81ee5fb 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -69,7 +69,7 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _write_network(self, settings): - # TODO fix this... since this is the ubuntu format + # TODO(harlowja) fix this... since this is the ubuntu format entries = translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) @@ -258,7 +258,7 @@ class QuotingConfigObj(ConfigObj): # This is a util function to translate a ubuntu /etc/network/interfaces 'blob' # to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/ -# TODO remove when we have python-netcf active... +# TODO(harlowja) remove when we have python-netcf active... def translate_network(settings): # Get the standard cmd, args from the ubuntu format entries = [] diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 6d1502f4..99caed1f 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -133,7 +133,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload): modfname = os.path.join(pdata['handlerdir'], "%s" % (modname)) if not modfname.endswith(".py"): modfname = "%s.py" % (modfname) - # TODO: Check if path exists?? + # TODO(harlowja): Check if path exists?? util.write_file(modfname, payload, 0600) handlers = pdata['handlers'] try: diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index a9d8e544..6c5c11ca 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -43,7 +43,7 @@ class ShellScriptPartHandler(handlers.Handler): def _handle_part(self, _data, ctype, filename, payload, _frequency): if ctype in handlers.CONTENT_SIGNALS: - # TODO: maybe delete existing things here + # TODO(harlowja): maybe delete existing things here return filename = util.clean_filename(filename) diff --git a/cloudinit/log.py b/cloudinit/log.py index 819c85b6..2333e5ee 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -21,8 +21,8 @@ # along with this program. If not, see . import logging -import logging.handlers import logging.config +import logging.handlers import collections import os diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 751bef4f..8056dcfa 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -49,8 +49,7 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address = "http://%s/" % (gw_addr) def get_default_gateway(self): - """ Returns the default gateway ip address in the dotted format - """ + """Returns the default gateway ip address in the dotted format.""" lines = util.load_file("/proc/net/route").splitlines() for line in lines: items = line.split("\t") diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 320dd1d1..850b281c 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -124,12 +124,12 @@ class NonConfigDriveDir(Exception): def find_cfg_drive_device(): - """ Get the config drive device. Return a string like '/dev/vdb' - or None (if there is no non-root device attached). This does not - check the contents, only reports that if there *were* a config_drive - attached, it would be this device. - Note: per config_drive documentation, this is - "associated as the last available disk on the instance" + """Get the config drive device. Return a string like '/dev/vdb' + or None (if there is no non-root device attached). This does not + check the contents, only reports that if there *were* a config_drive + attached, it would be this device. + Note: per config_drive documentation, this is + "associated as the last available disk on the instance" """ # This seems to be for debugging?? @@ -160,7 +160,7 @@ def read_config_drive_dir(source_dir): string populated. If not a valid dir, raise a NonConfigDriveDir """ - # TODO: fix this for other operating systems... + # TODO(harlowja): fix this for other operating systems... # Ie: this is where https://fedorahosted.org/netcf/ or similar should # be hooked in... (or could be) found = {} diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 2f6a566c..c9634a90 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -326,7 +326,7 @@ class Init(object): 'paths': self.paths, 'datasource': self.datasource, } - # TODO Hmmm, should we dynamically import these?? + # TODO(harlowja) Hmmm, should we dynamically import these?? def_handlers = [ cc_part.CloudConfigPartHandler(**opts), ss_part.ShellScriptPartHandler(**opts), @@ -519,7 +519,7 @@ class Modules(object): " but not on %s distro. It may or may not work" " correctly."), name, worked_distros, d_name) # Use the configs logger and not our own - # TODO: possibly check the module + # TODO(harlowja): possibly check the module # for having a LOG attr and just give it back # its own logger? func_args = [name, self.cfg, diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index f5d01818..af98b488 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -23,9 +23,9 @@ import os import email +from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from email.mime.base import MIMEBase from cloudinit import handlers from cloudinit import log as logging @@ -159,7 +159,7 @@ class UserDataProcessor(object): if isinstance(ent, (str, basestring)): ent = {'content': ent} if not isinstance(ent, (dict)): - # TODO raise? + # TODO(harlowja) raise? continue content = ent.get('content', '') diff --git a/cloudinit/util.py b/cloudinit/util.py index a8c0cceb..825867a7 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -24,8 +24,8 @@ from StringIO import StringIO -import copy as obj_copy import contextlib +import copy as obj_copy import errno import glob import grp @@ -317,8 +317,9 @@ def multi_log(text, console=True, stderr=True, else: log.log(log_level, text) + def is_ipv4(instr): - """ determine if input string is a ipv4 address. return boolean""" + """determine if input string is a ipv4 address. return boolean.""" toks = instr.split('.') if len(toks) != 4: return False @@ -826,12 +827,12 @@ def get_cmdline_url(names=('cloud-config-url', 'url'), def is_resolvable(name): - """ determine if a url is resolvable, return a boolean + """determine if a url is resolvable, return a boolean This also attempts to be resilent against dns redirection. Note, that normal nsswitch resolution is used here. So in order to avoid any utilization of 'search' entries in /etc/resolv.conf - we have to append '.'. + we have to append '.'. The top level 'invalid' domain is invalid per RFC. And example.com should also not exist. The random entry will be resolved inside @@ -847,7 +848,7 @@ def is_resolvable(name): try: result = socket.getaddrinfo(iname, None, 0, 0, socket.SOCK_STREAM, socket.AI_CANONNAME) - badresults[iname] = [] + badresults[iname] = [] for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) badips.add(sockaddr[0]) @@ -856,7 +857,7 @@ def is_resolvable(name): _DNS_REDIRECT_IP = badips if badresults: LOG.debug("detected dns redirection: %s" % badresults) - + try: result = socket.getaddrinfo(name, None) # check first result's sockaddr field @@ -874,7 +875,7 @@ def get_hostname(): def is_resolvable_url(url): - """ determine if this url is resolvable (existing or ip) """ + """determine if this url is resolvable (existing or ip).""" return (is_resolvable(urlparse.urlparse(url).hostname)) @@ -1105,7 +1106,7 @@ def hash_blob(blob, routine, mlen=None): def rename(src, dest): LOG.debug("Renaming %s to %s", src, dest) - # TODO use a se guard here?? + # TODO(harlowja) use a se guard here?? os.rename(src, dest) diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 464c8c2f..ac082076 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -1,6 +1,6 @@ -import StringIO import logging import os +import StringIO import sys from mocker import MockerTestCase, ANY, ARGS, KWARGS @@ -61,14 +61,14 @@ class TestWalkerHandleHandler(MockerTestCase): import_mock(self.expected_module_name) self.mocker.result(self.module_fake) self.mocker.replay() - + handlers.walker_handle_handler(self.data, self.ctype, self.filename, self.payload) - + self.assertEqual(1, self.data["handlercount"]) - + def test_import_error(self): - """Module import errors are logged. No handler added to C{pdata}""" + """Module import errors are logged. No handler added to C{pdata}.""" import_mock = self.mocker.replace(importer.import_module, passthrough=False) import_mock(self.expected_module_name) @@ -81,7 +81,7 @@ class TestWalkerHandleHandler(MockerTestCase): self.assertEqual(0, self.data["handlercount"]) def test_attribute_error(self): - """Attribute errors are logged. No handler added to C{pdata}""" + """Attribute errors are logged. No handler added to C{pdata}.""" import_mock = self.mocker.replace(importer.import_module, passthrough=False) import_mock(self.expected_module_name) @@ -156,7 +156,7 @@ class TestHandlerHandlePart(MockerTestCase): self.payload, self.frequency) def test_no_handle_when_modfreq_once(self): - """C{handle_part} is not called if frequency is once""" + """C{handle_part} is not called if frequency is once.""" self.frequency = "once" mod_mock = self.mocker.mock() getattr(mod_mock, "frequency") diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 5bba8bc9..ebc0bd51 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -1,4 +1,4 @@ -"""Tests of the built-in user data handlers""" +"""Tests of the built-in user data handlers.""" import os @@ -33,7 +33,7 @@ class TestBuiltins(MockerTestCase): None, None, None) self.assertEquals(0, len(os.listdir(up_root))) - def test_upstart_frequency_single(self): + def test_upstart_frequency_single(self): c_root = self.makeDir() up_root = self.makeDir() paths = helpers.Paths({ diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index 54e152e9..bda61c7e 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -25,14 +25,15 @@ import os import shutil import tempfile -from unittest import TestCase from cloudinit import helpers +from unittest import TestCase # Get the cloudinit.sources.DataSourceAltCloud import items needed. import cloudinit.sources.DataSourceAltCloud from cloudinit.sources.DataSourceAltCloud import DataSourceAltCloud from cloudinit.sources.DataSourceAltCloud import read_user_data_callback + def _write_cloud_info_file(value): ''' Populate the CLOUD_INFO_FILE which would be populated @@ -44,12 +45,14 @@ def _write_cloud_info_file(value): cifile.close() os.chmod(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 0664) + def _remove_cloud_info_file(): ''' Remove the test CLOUD_INFO_FILE ''' os.remove(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE) + def _write_user_data_files(mount_dir, value): ''' Populate the deltacloud_user_data_file the user_data_file @@ -68,6 +71,7 @@ def _write_user_data_files(mount_dir, value): udfile.close() os.chmod(user_data_file, 0664) + def _remove_user_data_files(mount_dir, dc_file=True, non_dc_file=True): @@ -91,14 +95,15 @@ def _remove_user_data_files(mount_dir, except OSError: pass + class TestGetCloudType(TestCase): ''' - Test to exercise method: DataSourceAltCloud.get_cloud_type() + Test to exercise method: DataSourceAltCloud.get_cloud_type() ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) def tearDown(self): # Reset @@ -158,14 +163,15 @@ class TestGetCloudType(TestCase): self.assertEquals('UNKNOWN', \ dsrc.get_cloud_type()) + class TestGetDataCloudInfoFile(TestCase): ''' - Test to exercise method: DataSourceAltCloud.get_data() + Test to exercise method: DataSourceAltCloud.get_data() With a contrived CLOUD_INFO_FILE ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.cloud_info_file = tempfile.mkstemp()[1] cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ self.cloud_info_file @@ -183,52 +189,53 @@ class TestGetDataCloudInfoFile(TestCase): '/etc/sysconfig/cloud-info' def test_rhev(self): - '''Success Test module get_data() forcing RHEV ''' + '''Success Test module get_data() forcing RHEV.''' _write_cloud_info_file('RHEV') dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda : True + dsrc.user_data_rhevm = lambda: True self.assertEquals(True, dsrc.get_data()) def test_vsphere(self): - '''Success Test module get_data() forcing VSPHERE ''' + '''Success Test module get_data() forcing VSPHERE.''' _write_cloud_info_file('VSPHERE') dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda : True + dsrc.user_data_vsphere = lambda: True self.assertEquals(True, dsrc.get_data()) def test_fail_rhev(self): - '''Failure Test module get_data() forcing RHEV ''' + '''Failure Test module get_data() forcing RHEV.''' _write_cloud_info_file('RHEV') dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda : False + dsrc.user_data_rhevm = lambda: False self.assertEquals(False, dsrc.get_data()) def test_fail_vsphere(self): - '''Failure Test module get_data() forcing VSPHERE ''' + '''Failure Test module get_data() forcing VSPHERE.''' _write_cloud_info_file('VSPHERE') dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda : False + dsrc.user_data_vsphere = lambda: False self.assertEquals(False, dsrc.get_data()) def test_unrecognized(self): - '''Failure Test module get_data() forcing unrecognized ''' + '''Failure Test module get_data() forcing unrecognized.''' _write_cloud_info_file('unrecognized') dsrc = DataSourceAltCloud({}, None, self.paths) self.assertEquals(False, dsrc.get_data()) + class TestGetDataNoCloudInfoFile(TestCase): ''' - Test to exercise method: DataSourceAltCloud.get_data() + Test to exercise method: DataSourceAltCloud.get_data() Without a CLOUD_INFO_FILE ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ 'no such file' @@ -240,38 +247,39 @@ class TestGetDataNoCloudInfoFile(TestCase): ['dmidecode', '--string', 'system-product-name'] def test_rhev_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing RHEV ''' + '''Test No cloud info file module get_data() forcing RHEV.''' cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \ ['echo', 'RHEV Hypervisor'] dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda : True + dsrc.user_data_rhevm = lambda: True self.assertEquals(True, dsrc.get_data()) def test_vsphere_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing VSPHERE ''' + '''Test No cloud info file module get_data() forcing VSPHERE.''' cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \ ['echo', 'VMware Virtual Platform'] dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda : True + dsrc.user_data_vsphere = lambda: True self.assertEquals(True, dsrc.get_data()) def test_failure_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing unrecognized ''' + '''Test No cloud info file module get_data() forcing unrecognized.''' cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \ ['echo', 'Unrecognized Platform'] dsrc = DataSourceAltCloud({}, None, self.paths) self.assertEquals(False, dsrc.get_data()) + class TestUserDataRhevm(TestCase): ''' - Test to exercise method: DataSourceAltCloud.user_data_rhevm() + Test to exercise method: DataSourceAltCloud.user_data_rhevm() ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.mount_dir = tempfile.mkdtemp() _write_user_data_files(self.mount_dir, 'test user data') @@ -295,7 +303,7 @@ class TestUserDataRhevm(TestCase): ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5'] def test_mount_cb_fails(self): - '''Test user_data_rhevm() where mount_cb fails''' + '''Test user_data_rhevm() where mount_cb fails.''' cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ ['echo', 'modprobe floppy'] @@ -305,7 +313,7 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) def test_modprobe_fails(self): - '''Test user_data_rhevm() where modprobe fails. ''' + '''Test user_data_rhevm() where modprobe fails.''' cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ ['ls', 'modprobe floppy'] @@ -315,7 +323,7 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) def test_no_modprobe_cmd(self): - '''Test user_data_rhevm() with no modprobe command. ''' + '''Test user_data_rhevm() with no modprobe command.''' cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ ['bad command', 'modprobe floppy'] @@ -325,7 +333,7 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) def test_udevadm_fails(self): - '''Test user_data_rhevm() where udevadm fails. ''' + '''Test user_data_rhevm() where udevadm fails.''' cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \ ['ls', 'udevadm floppy'] @@ -335,7 +343,7 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) def test_no_udevadm_cmd(self): - '''Test user_data_rhevm() with no udevadm command. ''' + '''Test user_data_rhevm() with no udevadm command.''' cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \ ['bad command', 'udevadm floppy'] @@ -344,13 +352,14 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) + class TestUserDataVsphere(TestCase): ''' - Test to exercise method: DataSourceAltCloud.user_data_vsphere() + Test to exercise method: DataSourceAltCloud.user_data_vsphere() ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.mount_dir = tempfile.mkdtemp() _write_user_data_files(self.mount_dir, 'test user data') @@ -370,7 +379,7 @@ class TestUserDataVsphere(TestCase): '/etc/sysconfig/cloud-info' def test_user_data_vsphere(self): - '''Test user_data_vsphere() where mount_cb fails''' + '''Test user_data_vsphere() where mount_cb fails.''' cloudinit.sources.DataSourceAltCloud.MEDIA_DIR = self.mount_dir @@ -378,13 +387,14 @@ class TestUserDataVsphere(TestCase): self.assertEquals(False, dsrc.user_data_vsphere()) + class TestReadUserDataCallback(TestCase): ''' - Test to exercise method: DataSourceAltCloud.read_user_data_callback() + Test to exercise method: DataSourceAltCloud.read_user_data_callback() ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.mount_dir = tempfile.mkdtemp() _write_user_data_files(self.mount_dir, 'test user data') @@ -400,15 +410,14 @@ class TestReadUserDataCallback(TestCase): except OSError: pass - def test_callback_both(self): - '''Test read_user_data_callback() with both files''' + '''Test read_user_data_callback() with both files.''' self.assertEquals('test user data', read_user_data_callback(self.mount_dir)) def test_callback_dc(self): - '''Test read_user_data_callback() with only DC file''' + '''Test read_user_data_callback() with only DC file.''' _remove_user_data_files(self.mount_dir, dc_file=False, @@ -418,7 +427,7 @@ class TestReadUserDataCallback(TestCase): read_user_data_callback(self.mount_dir)) def test_callback_non_dc(self): - '''Test read_user_data_callback() with only non-DC file''' + '''Test read_user_data_callback() with only non-DC file.''' _remove_user_data_files(self.mount_dir, dc_file=True, @@ -428,9 +437,9 @@ class TestReadUserDataCallback(TestCase): read_user_data_callback(self.mount_dir)) def test_callback_none(self): - '''Test read_user_data_callback() no files are found''' + '''Test read_user_data_callback() no files are found.''' - _remove_user_data_files(self.mount_dir) + _remove_user_data_files(self.mount_dir) self.assertEquals(None, read_user_data_callback(self.mount_dir)) # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 8a155f39..85e6add0 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -1,8 +1,8 @@ -import os from copy import copy +import os -from cloudinit import url_helper from cloudinit.sources import DataSourceMAAS +from cloudinit import url_helper from mocker import MockerTestCase @@ -15,7 +15,7 @@ class TestMAASDataSource(MockerTestCase): self.tmp = self.makeDir() def test_seed_dir_valid(self): - """Verify a valid seeddir is read as such""" + """Verify a valid seeddir is read as such.""" data = {'instance-id': 'i-valid01', 'local-hostname': 'valid01-hostname', @@ -35,7 +35,7 @@ class TestMAASDataSource(MockerTestCase): self.assertFalse(('user-data' in metadata)) def test_seed_dir_valid_extra(self): - """Verify extra files do not affect seed_dir validity """ + """Verify extra files do not affect seed_dir validity.""" data = {'instance-id': 'i-valid-extra', 'local-hostname': 'valid-extra-hostname', @@ -54,7 +54,7 @@ class TestMAASDataSource(MockerTestCase): self.assertFalse(('foo' in metadata)) def test_seed_dir_invalid(self): - """Verify that invalid seed_dir raises MAASSeedDirMalformed""" + """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" valid = {'instance-id': 'i-instanceid', 'local-hostname': 'test-hostname', 'user-data': ''} @@ -78,20 +78,20 @@ class TestMAASDataSource(MockerTestCase): DataSourceMAAS.read_maas_seed_dir, my_d) def test_seed_dir_none(self): - """Verify that empty seed_dir raises MAASSeedDirNone""" + """Verify that empty seed_dir raises MAASSeedDirNone.""" my_d = os.path.join(self.tmp, "valid_empty") self.assertRaises(DataSourceMAAS.MAASSeedDirNone, DataSourceMAAS.read_maas_seed_dir, my_d) def test_seed_dir_missing(self): - """Verify that missing seed_dir raises MAASSeedDirNone""" - self.assertRaises(DataSourceMAAS.MAASSeedDirNone, + """Verify that missing seed_dir raises MAASSeedDirNone.""" + self.assertRaises(DataSourceMAAS.MAASSeedDirNone, DataSourceMAAS.read_maas_seed_dir, os.path.join(self.tmp, "nonexistantdirectory")) def test_seed_url_valid(self): - """Verify that valid seed_url is read as such""" + """Verify that valid seed_url is read as such.""" valid = {'meta-data/instance-id': 'i-instanceid', 'meta-data/local-hostname': 'test-hostname', 'meta-data/public-keys': 'test-hostname', @@ -129,11 +129,11 @@ class TestMAASDataSource(MockerTestCase): valid['meta-data/local-hostname']) def test_seed_url_invalid(self): - """Verify that invalid seed_url raises MAASSeedDirMalformed""" + """Verify that invalid seed_url raises MAASSeedDirMalformed.""" pass def test_seed_url_missing(self): - """Verify seed_url with no found entries raises MAASSeedDirNone""" + """Verify seed_url with no found entries raises MAASSeedDirNone.""" pass diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index 948de4c4..d3df5c50 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -1,8 +1,8 @@ from mocker import MockerTestCase -from cloudinit import util from cloudinit import cloud from cloudinit import helpers +from cloudinit import util from cloudinit.config import cc_ca_certs @@ -64,7 +64,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_empty_trusted_list(self): - """Test that no certificate are written if 'trusted' list is empty""" + """Test that no certificate are written if 'trusted' list is empty.""" config = {"ca-certs": {"trusted": []}} # No functions should be called @@ -74,7 +74,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_single_trusted(self): - """Test that a single cert gets passed to add_ca_certs""" + """Test that a single cert gets passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1"]}} self.mock_add(self.paths, ["CERT1"]) @@ -84,7 +84,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_multiple_trusted(self): - """Test that multiple certs get passed to add_ca_certs""" + """Test that multiple certs get passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} self.mock_add(self.paths, ["CERT1", "CERT2"]) @@ -94,7 +94,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_remove_default_ca_certs(self): - """Test remove_defaults works as expected""" + """Test remove_defaults works as expected.""" config = {"ca-certs": {"remove-defaults": True}} self.mock_remove(self.paths) @@ -104,7 +104,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_no_remove_defaults_if_false(self): - """Test remove_defaults is not called when config value is False""" + """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": False}} self.mock_update() @@ -113,7 +113,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_correct_order_for_remove_then_add(self): - """Test remove_defaults is not called when config value is False""" + """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} self.mock_remove(self.paths) @@ -139,7 +139,7 @@ class TestAddCaCerts(MockerTestCase): cc_ca_certs.add_ca_certs(self.paths, []) def test_single_cert(self): - """Test adding a single certificate to the trusted CAs""" + """Test adding a single certificate to the trusted CAs.""" cert = "CERT1\nLINE2\nLINE3" mock_write = self.mocker.replace(util.write_file, passthrough=False) @@ -152,7 +152,7 @@ class TestAddCaCerts(MockerTestCase): cc_ca_certs.add_ca_certs(self.paths, [cert]) def test_multiple_certs(self): - """Test adding multiple certificates to the trusted CAs""" + """Test adding multiple certificates to the trusted CAs.""" certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"] expected_cert_file = "\n".join(certs) diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index fbbf07f2..82a4c555 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -1,4 +1,4 @@ -"""Tests for handling of userdata within cloud init""" +"""Tests for handling of userdata within cloud init.""" import StringIO @@ -54,7 +54,7 @@ class TestConsumeUserData(MockerTestCase): return log_file def test_unhandled_type_warning(self): - """Raw text without magic is ignored but shows warning""" + """Raw text without magic is ignored but shows warning.""" ci = stages.Init() data = "arbitrary text\n" ci.datasource = FakeDataSource(data) @@ -70,7 +70,7 @@ class TestConsumeUserData(MockerTestCase): log_file.getvalue()) def test_mime_text_plain(self): - """Mime message of type text/plain is ignored but shows warning""" + """Mime message of type text/plain is ignored but shows warning.""" ci = stages.Init() message = MIMEBase("text", "plain") message.set_payload("Just text") @@ -86,9 +86,8 @@ class TestConsumeUserData(MockerTestCase): "Unhandled unknown content-type (text/plain)", log_file.getvalue()) - def test_shellscript(self): - """Raw text starting #!/bin/sh is treated as script""" + """Raw text starting #!/bin/sh is treated as script.""" ci = stages.Init() script = "#!/bin/sh\necho hello\n" ci.datasource = FakeDataSource(script) @@ -104,7 +103,7 @@ class TestConsumeUserData(MockerTestCase): self.assertEqual("", log_file.getvalue()) def test_mime_text_x_shellscript(self): - """Mime message of type text/x-shellscript is treated as script""" + """Mime message of type text/x-shellscript is treated as script.""" ci = stages.Init() script = "#!/bin/sh\necho hello\n" message = MIMEBase("text", "x-shellscript") @@ -122,7 +121,7 @@ class TestConsumeUserData(MockerTestCase): self.assertEqual("", log_file.getvalue()) def test_mime_text_plain_shell(self): - """Mime type text/plain starting #!/bin/sh is treated as script""" + """Mime type text/plain starting #!/bin/sh is treated as script.""" ci = stages.Init() script = "#!/bin/sh\necho hello\n" message = MIMEBase("text", "plain") diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 19f66cc4..15fcbd26 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1,11 +1,11 @@ import os import stat -from unittest import TestCase from mocker import MockerTestCase +from unittest import TestCase -from cloudinit import util from cloudinit import importer +from cloudinit import util class FakeSelinux(object): diff --git a/tools/hacking.py b/tools/hacking.py index a2e6e829..11163df3 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -100,7 +100,7 @@ def cloud_todo_format(physical_line): """ pos = physical_line.find('TODO') pos1 = physical_line.find('TODO(') - pos2 = physical_line.find('#') # make sure it's a comment + pos2 = physical_line.find('#') # make sure it's a comment if (pos != pos1 and pos2 >= 0 and pos2 < pos): return pos, "N101: Use TODO(NAME)" @@ -133,7 +133,6 @@ def cloud_docstring_multiline_end(physical_line): return (pos, "N403: multi line docstring end on new line") - current_file = "" @@ -169,4 +168,3 @@ if __name__ == "__main__": if len(_missingImport) > 0: print >> sys.stderr, ("%i imports missing in this test environment" % len(_missingImport)) - diff --git a/tools/mock-meta.py b/tools/mock-meta.py index 78838f64..c79f0598 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -6,7 +6,7 @@ """ To use this to mimic the EC2 metadata service entirely, run it like: - # Where 'eth0' is *some* interface. + # Where 'eth0' is *some* interface. sudo ifconfig eth0:0 169.254.169.254 netmask 255.255.255.255 sudo ./mock-meta.py -a 169.254.169.254 -p 80 @@ -23,7 +23,7 @@ import json import logging import os import random -import string # pylint: disable=W0402 +import string # pylint: disable=W0402 import sys import yaml @@ -156,6 +156,8 @@ def traverse(keys, mp): ID_CHARS = [c for c in (string.ascii_uppercase + string.digits)] + + def id_generator(size=6, lower=False): txt = ''.join(random.choice(ID_CHARS) for x in range(size)) if lower: @@ -235,11 +237,11 @@ class MetaDataHandler(object): nparams = params[1:] # This is a weird kludge, why amazon why!!! # public-keys is messed up, list of /latest/meta-data/public-keys/ - # shows something like: '0=brickies' - # but a GET to /latest/meta-data/public-keys/0=brickies will fail - # you have to know to get '/latest/meta-data/public-keys/0', then - # from there you get a 'openssh-key', which you can get. - # this hunk of code just re-works the object for that. + # shows something like: '0=brickies' + # but a GET to /latest/meta-data/public-keys/0=brickies will fail + # you have to know to get '/latest/meta-data/public-keys/0', then + # from there you get a 'openssh-key', which you can get. + # this hunk of code just re-works the object for that. avail_keys = get_ssh_keys() key_ids = sorted(list(avail_keys.keys())) if nparams: @@ -255,7 +257,7 @@ class MetaDataHandler(object): "openssh-key": "\n".join(avail_keys[key_name]), }) if isinstance(result, (dict)): - # TODO: This might not be right?? + # TODO(harlowja): This might not be right?? result = "\n".join(sorted(result.keys())) if not result: result = '' @@ -304,13 +306,13 @@ class UserDataHandler(object): blob = "\n".join(lines) return blob.strip() - def get_data(self, params, who, **kwargs): # pylint: disable=W0613 + def get_data(self, params, who, **kwargs): # pylint: disable=W0613 if not params: return self._get_user_blob(who=who) return NOT_IMPL_RESPONSE -# Seem to need to use globals since can't pass +# Seem to need to use globals since can't pass # data into the request handlers instances... # Puke! meta_fetcher = None @@ -432,7 +434,7 @@ def setup_fetchers(opts): def run_server(): - # Using global here since it doesn't seem like we + # Using global here since it doesn't seem like we # can pass opts into a request handler constructor... opts = extract_opts() setup_logging(logging.DEBUG) -- cgit v1.2.3 From cb740b3018f55c21bda3a38b216d4ed61ab30d42 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Aug 2012 14:36:02 -0400 Subject: fix for pep8 issues and seemingly functional now. --- cloudinit/config/cc_apt_update_upgrade.py | 9 +++++---- cloudinit/distros/__init__.py | 2 ++ 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 4b5f6a6d..49a93415 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -66,8 +66,8 @@ def handle(name, cfg, cloud, log, _args): 'apt_preserve_sources_list', False): generate_sources_list(release, mirrors, cloud, log) old_mirrors = cfg.get('apt_old_mirrors', - old_mirrors = {"primary": "archive.ubuntu.com/ubuntu", - "security": "security.ubuntu.com/ubuntu"}) + {"primary": "archive.ubuntu.com/ubuntu", + "security": "security.ubuntu.com/ubuntu"}) rename_apt_lists(old_mirrors, mirrors) # Set up any apt proxy @@ -166,6 +166,7 @@ def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): for filename in glob.glob("%s_*" % oprefix): util.rename(filename, "%s%s" % (nprefix, filename[olen:])) + def get_release(): (stdout, _stderr) = util.subp(['lsb_release', '-cs']) return stdout.strip() @@ -276,8 +277,8 @@ def find_apt_mirror_info(cloud, cfg): mirror_list.append(mirrorfmt % (post)) mirror = util.search_for_mirror(mirror_list) - - mirror_info = cloud.get_package_mirror_info() + + mirror_info = cloud.datasource.get_package_mirror_info() # this is a bit strange. # if mirror is set, then one of the legacy options above set it diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 62728a53..dce8fd06 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -209,6 +209,7 @@ def _get_package_mirror_info(mirror_info, availability_zone=None, return results + def _get_arch_package_mirror_info(package_mirrors, arch): # pull out the specific arch from a 'package_mirrors' config option default = None @@ -220,6 +221,7 @@ def _get_arch_package_mirror_info(package_mirrors, arch): default = item return default + def fetch(name): locs = importer.find_module(name, ['', __name__], -- cgit v1.2.3 From 0781b86b414b73c8b5dd9123bda95b7dd74b0243 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Aug 2012 15:18:01 -0400 Subject: improve the check for "uses unknown key" in mirror templates instead of substituting and then checking for presense of a unlikely to occur string, this only adds to the search list if there is no KeyError raised. --- cloudinit/distros/__init__.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index dce8fd06..357209a4 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -180,26 +180,24 @@ def _get_package_mirror_info(mirror_info, availability_zone=None, ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % "north|northeast|east|southeast|south|southwest|west|northwest") - unset_value = "_UNSET_VALUE_USED_" - azone = availability_zone + subst = {} + if availability_zone: + subst['availability_zone'] = availability_zone - if azone and re.match(ec2_az_re, azone): - ec2_region = "%s" % azone[0:-1] - elif azone: - ec2_region = unset_value - else: - azone = unset_value - ec2_region = unset_value + if availability_zone and re.match(ec2_az_re, availability_zone): + subst['ec2_region'] = "%s" % availability_zone[0:-1] results = {} for (name, mirror) in mirror_info.get('failsafe', {}).iteritems(): results[name] = mirror for (name, searchlist) in mirror_info.get('search', {}).iteritems(): - mirrors = [m % {'ec2_region': ec2_region, 'availability_zone': azone} - for m in searchlist] - # now filter out anything that used the unset availability zone - mirrors = [m for m in mirrors if m.find(unset_value) < 0] + mirrors = [] + for tmpl in searchlist: + try: + mirrors.append(tmpl % subst) + except KeyError: + pass found = mirror_filter(mirrors) if found: -- cgit v1.2.3 From 7b715270f0720c565af50e102d761cd83cb3569b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Aug 2012 17:02:54 -0400 Subject: add apt_reboot_if_required to reboot if required If an upgrade or package installation forced a reboot (such as a kernel upgrade), then we can optionally reboot at that point. This allows the user to not be into the newest available kernel without needing a reboot on their own. LP: #1038108 --- ChangeLog | 2 ++ cloudinit/config/cc_apt_update_upgrade.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index e9e88318..91f3834a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,6 @@ 0.7.0: + - add apt_reboot_if_required to reboot if an upgrade or package installation + forced the need for one (LP: #1038108) - allow distro mirror selection to include availability-zone (LP: #1037727) - allow arch specific mirror selection (select ports.ubuntu.com on arm) LP: #1028501 diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 49a93415..356bb98d 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -20,6 +20,7 @@ import glob import os +import time from cloudinit import templater from cloudinit import util @@ -125,6 +126,20 @@ def handle(name, cfg, cloud, log, _args): util.logexc(log, "Failed to install packages: %s ", pkglist) errors.append(e) + # kernel and openssl (possibly some other packages) + # write a file /var/run/reboot-required after upgrading. + # if that file exists and configured, then just stop right now and reboot + # TODO(smoser): handle this less voilently + reboot_file = "/var/run/reboot-required" + if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and + os.path.isfile(reboot_file)): + log.warn("rebooting after upgrade or install per %s" % reboot_file) + time.sleep(1) # give the warning time to get out + util.subp(["/sbin/reboot"]) + time.sleep(60) + log.warn("requested reboot did not happen!") + errors.append(Exception("requested reboot did not happen!")) + if len(errors): log.warn("%s failed with exceptions, re-raising the last one", len(errors)) -- cgit v1.2.3