diff options
author | Ben Howard <ben.howard@canonical.com> | 2012-08-22 16:35:11 -0600 |
---|---|---|
committer | Ben Howard <ben.howard@canonical.com> | 2012-08-22 16:35:11 -0600 |
commit | 6564861d44f843bb4e339db5691021ec7a95c511 (patch) | |
tree | d3f9f2fbc6e8e01bcab073f77b6f3d673a1da6be | |
parent | a6752e739a0bb9052585b9b043ce1964bd77bb42 (diff) | |
parent | 56979d20b9c56c45bfbcaf93bc5f93fa505ece50 (diff) | |
download | vyos-cloud-init-6564861d44f843bb4e339db5691021ec7a95c511.tar.gz vyos-cloud-init-6564861d44f843bb4e339db5691021ec7a95c511.zip |
Merge with lp:cloud-init
43 files changed, 704 insertions, 291 deletions
@@ -1,4 +1,14 @@ 0.7.0: + - add apt_reboot_if_required to reboot if an upgrade or package installation + forced the need for one (LP: #1038108) + - allow distro mirror selection to include availability-zone (LP: #1037727) + - allow arch specific mirror selection (select ports.ubuntu.com on arm) + LP: #1028501 + - allow specification of security mirrors (LP: #1006963) + - add the 'None' datasource (LP: #906669), which will allow jobs + to run even if there is no "real" datasource found. + - write ssh authorized keys to console, ssh_authkey_fingerprints + config module [Joshua Harlow] (LP: #1010582) - Added RHEVm and vSphere support as source AltCloud [Joseph VLcek] - add write-files module (LP: #1012854) - Add setuptools + cheetah to debian package build dependencies (LP: #1022101) diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 22d9167e..620b3c07 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -82,9 +82,6 @@ class Cloud(object): def get_locale(self): return self.datasource.get_locale() - def get_local_mirror(self): - return self.datasource.get_local_mirror() - def get_hostname(self, fqdn=False): return self.datasource.get_hostname(fqdn=fqdn) diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 3426099e..02056ee0 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -16,8 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -from cloudinit import util from cloudinit.settings import PER_INSTANCE +from cloudinit import util frequency = PER_INSTANCE @@ -50,7 +50,7 @@ def handle(_name, cfg, cloud, log, _args): def write_apt_snippet(cloud, setting, log, f_name): - """ Writes f_name with apt pipeline depth 'setting' """ + """Writes f_name with apt pipeline depth 'setting'.""" file_contents = APT_PIPE_TPL % (setting) diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 1bffa47d..356bb98d 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -20,6 +20,7 @@ import glob import os +import time from cloudinit import templater from cloudinit import util @@ -50,20 +51,25 @@ def handle(name, cfg, cloud, log, _args): upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) release = get_release() - mirror = find_apt_mirror(cloud, cfg) - if not mirror: + mirrors = find_apt_mirror_info(cloud, cfg) + if not mirrors or "primary" not in mirrors: log.debug(("Skipping module named %s," " no package 'mirror' located"), name) return - log.debug("Selected mirror at: %s" % mirror) + # backwards compatibility + mirror = mirrors["primary"] + mirrors["mirror"] = mirror + + log.debug("mirror info: %s" % mirrors) if not util.get_cfg_option_bool(cfg, 'apt_preserve_sources_list', False): - generate_sources_list(release, mirror, cloud, log) - old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', - "archive.ubuntu.com/ubuntu") - rename_apt_lists(old_mir, mirror) + generate_sources_list(release, mirrors, cloud, log) + old_mirrors = cfg.get('apt_old_mirrors', + {"primary": "archive.ubuntu.com/ubuntu", + "security": "security.ubuntu.com/ubuntu"}) + rename_apt_lists(old_mirrors, mirrors) # Set up any apt proxy proxy = cfg.get("apt_proxy", None) @@ -81,8 +87,10 @@ def handle(name, cfg, cloud, log, _args): # Process 'apt_sources' if 'apt_sources' in cfg: - errors = add_sources(cloud, cfg['apt_sources'], - {'MIRROR': mirror, 'RELEASE': release}) + params = mirrors + params['RELEASE'] = release + params['MIRROR'] = mirror + errors = add_sources(cloud, cfg['apt_sources'], params) for e in errors: log.warn("Source Error: %s", ':'.join(e)) @@ -118,6 +126,20 @@ def handle(name, cfg, cloud, log, _args): util.logexc(log, "Failed to install packages: %s ", pkglist) errors.append(e) + # kernel and openssl (possibly some other packages) + # write a file /var/run/reboot-required after upgrading. + # if that file exists and configured, then just stop right now and reboot + # TODO(smoser): handle this less voilently + reboot_file = "/var/run/reboot-required" + if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and + os.path.isfile(reboot_file)): + log.warn("rebooting after upgrade or install per %s" % reboot_file) + time.sleep(1) # give the warning time to get out + util.subp(["/sbin/reboot"]) + time.sleep(60) + log.warn("requested reboot did not happen!") + errors.append(Exception("requested reboot did not happen!")) + if len(errors): log.warn("%s failed with exceptions, re-raising the last one", len(errors)) @@ -146,15 +168,18 @@ def mirror2lists_fileprefix(mirror): return string -def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"): - oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) - nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror)) - if oprefix == nprefix: - return - olen = len(oprefix) - for filename in glob.glob("%s_*" % oprefix): - # TODO use the cloud.paths.join... - util.rename(filename, "%s%s" % (nprefix, filename[olen:])) +def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): + for (name, omirror) in old_mirrors.iteritems(): + nmirror = new_mirrors.get(name) + if not nmirror: + continue + oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) + nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror)) + if oprefix == nprefix: + continue + olen = len(oprefix) + for filename in glob.glob("%s_*" % oprefix): + util.rename(filename, "%s%s" % (nprefix, filename[olen:])) def get_release(): @@ -162,14 +187,17 @@ def get_release(): return stdout.strip() -def generate_sources_list(codename, mirror, cloud, log): +def generate_sources_list(codename, mirrors, cloud, log): template_fn = cloud.get_template_filename('sources.list') - if template_fn: - params = {'mirror': mirror, 'codename': codename} - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) - else: + if not template_fn: log.warn("No template found, not rendering /etc/apt/sources.list") + return + + params = {'codename': codename} + for k in mirrors: + params[k] = mirrors[k] + out_fn = cloud.paths.join(False, '/etc/apt/sources.list') + templater.render_to_file(template_fn, out_fn, params) def add_sources(cloud, srclist, template_params=None): @@ -231,43 +259,47 @@ def add_sources(cloud, srclist, template_params=None): return errorlist -def find_apt_mirror(cloud, cfg): - """ find an apt_mirror given the cloud and cfg provided """ +def find_apt_mirror_info(cloud, cfg): + """find an apt_mirror given the cloud and cfg provided.""" mirror = None - cfg_mirror = cfg.get("apt_mirror", None) - if cfg_mirror: - mirror = cfg["apt_mirror"] - elif "apt_mirror_search" in cfg: - mirror = util.search_for_mirror(cfg['apt_mirror_search']) - else: - mirror = cloud.get_local_mirror() + # this is less preferred way of specifying mirror preferred would be to + # use the distro's search or package_mirror. + mirror = cfg.get("apt_mirror", None) - mydom = "" + search = cfg.get("apt_mirror_search", None) + if not mirror and search: + mirror = util.search_for_mirror(search) + if (not mirror and + util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): + mydom = "" doms = [] - if not mirror: - # if we have a fqdn, then search its domain portion first - (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) - mydom = ".".join(fqdn.split(".")[1:]) - if mydom: - doms.append(".%s" % mydom) + # if we have a fqdn, then search its domain portion first + (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) + mydom = ".".join(fqdn.split(".")[1:]) + if mydom: + doms.append(".%s" % mydom) + + doms.extend((".localdomain", "",)) - if (not mirror and - util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): - doms.extend((".localdomain", "",)) + mirror_list = [] + distro = cloud.distro.name + mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) + for post in doms: + mirror_list.append(mirrorfmt % (post)) - mirror_list = [] - distro = cloud.distro.name - mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) - for post in doms: - mirror_list.append(mirrorfmt % (post)) + mirror = util.search_for_mirror(mirror_list) - mirror = util.search_for_mirror(mirror_list) + mirror_info = cloud.datasource.get_package_mirror_info() - if not mirror: - mirror = cloud.distro.get_package_mirror() + # this is a bit strange. + # if mirror is set, then one of the legacy options above set it + # but they do not cover security. so we need to get that from + # get_package_mirror_info + if mirror: + mirror_info.update({'primary': mirror}) - return mirror + return mirror_info diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index bae1ea54..896cb4d0 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -20,8 +20,8 @@ import os -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 68b86ff6..6d376184 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -20,8 +20,8 @@ import os -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS @@ -44,5 +44,5 @@ def handle(name, _cfg, cloud, log, args): try: util.subp(cmd) except Exception as e: - # TODO, use log exception from utils?? + # TODO(harlowja), use log exception from utils?? log.warn("Emission of upstart event %s failed due to: %s", n, e) diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index aff03c4e..6b864fda 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -28,7 +28,7 @@ frequency = PER_ALWAYS # Cheetah formated default message FINAL_MESSAGE_DEF = ("Cloud-init v. ${version} finished at ${timestamp}." - " Up ${uptime} seconds.") + " Datasource ${datasource}. Up ${uptime} seconds") def handle(_name, cfg, cloud, log, args): @@ -51,6 +51,7 @@ def handle(_name, cfg, cloud, log, args): 'uptime': uptime, 'timestamp': ts, 'version': cver, + 'datasource': str(cloud.datasource), } util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), console=False, stderr=True) @@ -63,3 +64,6 @@ def handle(_name, cfg, cloud, log, args): util.write_file(boot_fin_fn, contents) except: util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn) + + if cloud.datasource.is_disconnected: + log.warn("Used fallback datasource") diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 467c1496..74ee18e1 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -48,7 +48,8 @@ def handle(name, cfg, cloud, log, _args): # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to - # mix the rest up. First clean them up (TODO is this really needed??) + # mix the rest up. First clean them up + # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), @@ -80,7 +81,7 @@ def handle(name, cfg, cloud, log, _args): for (o, v) in cfg.iteritems(): if o == 'certname': # Expand %f as the fqdn - # TODO should this use the cloud fqdn?? + # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) # Expand %i as the instance id v = v.replace("%i", cloud.get_instance_id()) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 256a194f..e7f27944 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -22,8 +22,8 @@ import os import stat import time -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS @@ -72,12 +72,12 @@ def handle(name, cfg, cloud, log, args): log.debug("Skipping module named %s, resizing disabled", name) return - # TODO is the directory ok to be used?? + # TODO(harlowja) is the directory ok to be used?? resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") resize_root_d = cloud.paths.join(False, resize_root_d) util.ensure_dir(resize_root_d) - # TODO: allow what is to be resized to be configurable?? + # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = cloud.paths.join(False, "/") with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", dir=resize_root_d, delete=True) as tfh: @@ -136,5 +136,5 @@ def do_resize(resize_cmd, log): raise tot_time = time.time() - start log.debug("Resizing took %.3f seconds", tot_time) - # TODO: Should we add a fsck check after this to make + # TODO(harlowja): Should we add a fsck check after this to make # sure we didn't corrupt anything? diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index 45d41b3f..4bf18516 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -37,9 +37,9 @@ import os +from cloudinit.settings import PER_INSTANCE from cloudinit import url_helper as uhelp from cloudinit import util -from cloudinit.settings import PER_INSTANCE from urlparse import parse_qs @@ -72,7 +72,7 @@ def handle(name, _cfg, cloud, log, _args): captured_excps = [] # These will eventually be then ran by the cc_scripts_user - # TODO: maybe this should just be a new user data handler?? + # TODO(harlowja): maybe this should just be a new user data handler?? # Instead of a late module that acts like a user data handler? scripts_d = cloud.get_ipath_cur('scripts') urls = mdict[MY_HOOKNAME] diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 4019ae90..3431bd2a 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -18,11 +18,11 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import os import glob +import os -from cloudinit import util from cloudinit import ssh_util +from cloudinit import util DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding," "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " @@ -76,7 +76,7 @@ def handle(_name, cfg, cloud, log, _args): pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0]) cmd = ['sh', '-xc', KEY_GEN_TPL % pair] try: - # TODO: Is this guard needed? + # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): util.subp(cmd, capture=False) log.debug("Generated a key for %s from %s", pair[0], pair[1]) @@ -94,7 +94,7 @@ def handle(_name, cfg, cloud, log, _args): if not os.path.exists(keyfile): cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] try: - # TODO: Is this guard needed? + # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): util.subp(cmd, capture=False) except: diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py new file mode 100644 index 00000000..23f5755a --- /dev/null +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -0,0 +1,96 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow <harlowja@yahoo-inc.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import base64 +import hashlib + +from prettytable import PrettyTable + +from cloudinit import ssh_util +from cloudinit import util + + +def _split_hash(bin_hash): + split_up = [] + for i in xrange(0, len(bin_hash), 2): + split_up.append(bin_hash[i:i + 2]) + return split_up + + +def _gen_fingerprint(b64_text, hash_meth='md5'): + if not b64_text: + return '' + # TBD(harlowja): Maybe we should feed this into 'ssh -lf'? + try: + hasher = hashlib.new(hash_meth) + hasher.update(base64.b64decode(b64_text)) + return ":".join(_split_hash(hasher.hexdigest())) + except TypeError: + # Raised when b64 not really b64... + return '?' + + +def _is_printable_key(entry): + if any([entry.keytype, entry.base64, entry.comment, entry.options]): + if (entry.keytype and + entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']): + return True + return False + + +def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', + prefix='ci-info: '): + if not key_entries: + message = ("%sno authorized ssh keys fingerprints found for user %s." + % (prefix, user)) + util.multi_log(message) + return + tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options', + 'Comment'] + tbl = PrettyTable(tbl_fields) + for entry in key_entries: + if _is_printable_key(entry): + row = [] + row.append(entry.keytype or '-') + row.append(_gen_fingerprint(entry.base64, hash_meth) or '-') + row.append(entry.options or '-') + row.append(entry.comment or '-') + tbl.add_row(row) + authtbl_s = tbl.get_string() + authtbl_lines = authtbl_s.splitlines() + max_len = len(max(authtbl_lines, key=len)) + lines = [ + util.center("Authorized keys from %s for user %s" % + (key_fn, user), "+", max_len), + ] + lines.extend(authtbl_lines) + for line in lines: + util.multi_log(text="%s%s\n" % (prefix, line), + stderr=False, console=True) + + +def handle(name, cfg, cloud, log, _args): + if 'no_ssh_fingerprints' in cfg: + log.debug(("Skipping module named %s, " + "logging of ssh fingerprints disabled"), name) + + user_name = util.get_cfg_option_str(cfg, "user", "ubuntu") + hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") + extract = ssh_util.extract_authorized_keys + (auth_key_fn, auth_key_entries) = extract(user_name, cloud.paths) + _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth) diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 38108da7..4d75000f 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -18,8 +18,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -from cloudinit import util from cloudinit import templater +from cloudinit import util from cloudinit.settings import PER_ALWAYS diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index b84a1a06..1d6679ea 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -20,8 +20,8 @@ import os -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 1bfa4c25..a73d6f4e 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -19,8 +19,8 @@ import base64 import os -from cloudinit import util from cloudinit.settings import PER_INSTANCE +from cloudinit import util frequency = PER_INSTANCE @@ -46,7 +46,7 @@ def canonicalize_extraction(encoding_type, log): return ['application/x-gzip'] if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']: return ['application/base64', 'application/x-gzip'] - # Yaml already encodes binary data as base64 if it is given to the + # Yaml already encodes binary data as base64 if it is given to the # yaml file as binary, so those will be automatically decoded for you. # But the above b64 is just for people that are more 'comfortable' # specifing it manually (which might be a possiblity) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 614545f2..2dfb1409 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -24,15 +24,17 @@ from StringIO import StringIO import abc -import pwd import grp import os +import pwd +import re + from cloudinit import importer from cloudinit import log as logging from cloudinit import util from cloudinit import ssh_util -# TODO: Make this via config?? +# TODO(harlowja): Make this via config?? IFACE_ACTIONS = { 'up': ['ifup', '--all'], 'down': ['ifdown', '--all'], @@ -84,8 +86,26 @@ class Distro(object): def update_package_sources(self): raise NotImplementedError() - def get_package_mirror(self): - return self.get_option('package_mirror') + def get_primary_arch(self): + arch = os.uname[4] + if arch in ("i386", "i486", "i586", "i686"): + return "i386" + return arch + + def _get_arch_package_mirror_info(self, arch=None): + mirror_info = self.get_option("package_mirrors", None) + if arch == None: + arch = self.get_primary_arch() + return _get_arch_package_mirror_info(mirror_info, arch) + + def get_package_mirror_info(self, arch=None, + availability_zone=None): + # this resolves the package_mirrors config option + # down to a single dict of {mirror_name: mirror_url} + arch_info = self._get_arch_package_mirror_info(arch) + + return _get_package_mirror_info(availability_zone=availability_zone, + mirror_info=arch_info) def apply_network(self, settings, bring_up=True): # Write it out @@ -337,6 +357,55 @@ class Distro(object): LOG.info("Added user '%s' to group '%s'" % (member, name)) +def _get_package_mirror_info(mirror_info, availability_zone=None, + mirror_filter=util.search_for_mirror): + # given a arch specific 'mirror_info' entry (from package_mirrors) + # search through the 'search' entries, and fallback appropriately + # return a dict with only {name: mirror} entries. + + ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % + "north|northeast|east|southeast|south|southwest|west|northwest") + + subst = {} + if availability_zone: + subst['availability_zone'] = availability_zone + + if availability_zone and re.match(ec2_az_re, availability_zone): + subst['ec2_region'] = "%s" % availability_zone[0:-1] + + results = {} + for (name, mirror) in mirror_info.get('failsafe', {}).iteritems(): + results[name] = mirror + + for (name, searchlist) in mirror_info.get('search', {}).iteritems(): + mirrors = [] + for tmpl in searchlist: + try: + mirrors.append(tmpl % subst) + except KeyError: + pass + + found = mirror_filter(mirrors) + if found: + results[name] = found + + LOG.debug("filtered distro mirror info: %s" % results) + + return results + + +def _get_arch_package_mirror_info(package_mirrors, arch): + # pull out the specific arch from a 'package_mirrors' config option + default = None + for item in package_mirrors: + arches = item.get("arches") + if arch in arches: + return item + if "default" in arches: + default = item + return default + + def fetch(name): locs = importer.find_module(name, ['', __name__], diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 3247d7ce..da8c1a5b 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -147,3 +147,7 @@ class Distro(distros.Distro): def update_package_sources(self): self._runner.run("update-sources", self.package_command, ["update"], freq=PER_INSTANCE) + + def get_primary_arch(self): + (arch, _err) = util.subp(['dpkg', '--print-architecture']) + return str(arch).strip() diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 7fa69f03..d81ee5fb 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -69,7 +69,7 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _write_network(self, settings): - # TODO fix this... since this is the ubuntu format + # TODO(harlowja) fix this... since this is the ubuntu format entries = translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) @@ -258,7 +258,7 @@ class QuotingConfigObj(ConfigObj): # This is a util function to translate a ubuntu /etc/network/interfaces 'blob' # to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/ -# TODO remove when we have python-netcf active... +# TODO(harlowja) remove when we have python-netcf active... def translate_network(settings): # Get the standard cmd, args from the ubuntu format entries = [] diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 6d1502f4..99caed1f 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -133,7 +133,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload): modfname = os.path.join(pdata['handlerdir'], "%s" % (modname)) if not modfname.endswith(".py"): modfname = "%s.py" % (modfname) - # TODO: Check if path exists?? + # TODO(harlowja): Check if path exists?? util.write_file(modfname, payload, 0600) handlers = pdata['handlers'] try: diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index a9d8e544..6c5c11ca 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -43,7 +43,7 @@ class ShellScriptPartHandler(handlers.Handler): def _handle_part(self, _data, ctype, filename, payload, _frequency): if ctype in handlers.CONTENT_SIGNALS: - # TODO: maybe delete existing things here + # TODO(harlowja): maybe delete existing things here return filename = util.clean_filename(filename) diff --git a/cloudinit/log.py b/cloudinit/log.py index 819c85b6..2333e5ee 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -21,8 +21,8 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging -import logging.handlers import logging.config +import logging.handlers import collections import os diff --git a/cloudinit/settings.py b/cloudinit/settings.py index cdfc31ae..8cc9e3b4 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -35,7 +35,9 @@ CFG_BUILTIN = { 'OVF', 'MAAS', 'Ec2', - 'CloudStack' + 'CloudStack', + # At the end to act as a 'catch' when none of the above work... + 'None', ], 'def_log_file': '/var/log/cloud-init.log', 'log_cfgs': [], diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 751bef4f..f7ffa7cb 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -49,8 +49,7 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address = "http://%s/" % (gw_addr) def get_default_gateway(self): - """ Returns the default gateway ip address in the dotted format - """ + """Returns the default gateway ip address in the dotted format.""" lines = util.load_file("/proc/net/route").splitlines() for line in lines: items = line.split("\t") @@ -132,7 +131,8 @@ class DataSourceCloudStack(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def get_availability_zone(self): + @property + def availability_zone(self): return self.metadata['availability-zone'] diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 320dd1d1..850b281c 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -124,12 +124,12 @@ class NonConfigDriveDir(Exception): def find_cfg_drive_device(): - """ Get the config drive device. Return a string like '/dev/vdb' - or None (if there is no non-root device attached). This does not - check the contents, only reports that if there *were* a config_drive - attached, it would be this device. - Note: per config_drive documentation, this is - "associated as the last available disk on the instance" + """Get the config drive device. Return a string like '/dev/vdb' + or None (if there is no non-root device attached). This does not + check the contents, only reports that if there *were* a config_drive + attached, it would be this device. + Note: per config_drive documentation, this is + "associated as the last available disk on the instance" """ # This seems to be for debugging?? @@ -160,7 +160,7 @@ def read_config_drive_dir(source_dir): string populated. If not a valid dir, raise a NonConfigDriveDir """ - # TODO: fix this for other operating systems... + # TODO(harlowja): fix this for other operating systems... # Ie: this is where https://fedorahosted.org/netcf/ or similar should # be hooked in... (or could be) found = {} diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index d9eb8f17..556dcafb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -83,40 +83,6 @@ class DataSourceEc2(sources.DataSource): def get_availability_zone(self): return self.metadata['placement']['availability-zone'] - def get_local_mirror(self): - return self.get_mirror_from_availability_zone() - - def get_mirror_from_availability_zone(self, availability_zone=None): - # Return type None indicates there is no cloud specific mirror - # Availability is like 'us-west-1b' or 'eu-west-1a' - if availability_zone is None: - availability_zone = self.get_availability_zone() - - if self.is_vpc(): - return None - - if not availability_zone: - return None - - mirror_tpl = self.distro.get_option('package_mirror_ec2_template', - None) - - if mirror_tpl is None: - return None - - # in EC2, the 'region' is 'us-east-1' if 'zone' is 'us-east-1a' - tpl_params = { - 'zone': availability_zone.strip(), - 'region': availability_zone[:-1] - } - mirror_url = mirror_tpl % (tpl_params) - - found = util.search_for_mirror([mirror_url]) - if found is not None: - return mirror_url - - return None - def _get_url_settings(self): mcfg = self.ds_cfg if not mcfg: @@ -255,6 +221,12 @@ class DataSourceEc2(sources.DataSource): return True return False + @property + def availability_zone(self): + try: + return self.metadata['placement']['availability-zone'] + except KeyError: + return None # Used to match classes to dependencies datasources = [ diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py new file mode 100644 index 00000000..c2125bee --- /dev/null +++ b/cloudinit/sources/DataSourceNone.py @@ -0,0 +1,61 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow <harlowja@yahoo-inc.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class DataSourceNone(sources.DataSource): + def __init__(self, sys_cfg, distro, paths, ud_proc=None): + sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) + self.metadata = {} + self.userdata_raw = '' + + def get_data(self): + # If the datasource config has any provided 'fallback' + # userdata or metadata, use it... + if 'userdata_raw' in self.ds_cfg: + self.userdata_raw = self.ds_cfg['userdata_raw'] + if 'metadata' in self.ds_cfg: + self.metadata = self.ds_cfg['metadata'] + return True + + def get_instance_id(self): + return 'iid-datasource-none' + + def __str__(self): + return util.obj_name(self) + + @property + def is_disconnected(self): + return True + + +# Used to match classes to dependencies +datasources = [ + (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceNone, []), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index b25724a5..4719d254 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -65,6 +65,10 @@ class DataSource(object): self.userdata = self.ud_proc.process(raw_data) return self.userdata + @property + def is_disconnected(self): + return False + def get_userdata_raw(self): return self.userdata_raw @@ -113,9 +117,9 @@ class DataSource(object): def get_locale(self): return 'en_US.UTF-8' - def get_local_mirror(self): - # ?? - return None + @property + def availability_zone(self): + return self.metadata.get('availability-zone') def get_instance_id(self): if not self.metadata or 'instance-id' not in self.metadata: @@ -162,6 +166,10 @@ class DataSource(object): else: return hostname + def get_package_mirror_info(self): + return self.distro.get_package_mirror_info( + availability_zone=self.availability_zone) + def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): ds_list = list_sources(cfg_list, ds_deps, pkg_list) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index e0a2f0ca..88a11a1a 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -181,12 +181,11 @@ def parse_authorized_keys(fname): return contents -def update_authorized_keys(fname, keys): - entries = parse_authorized_keys(fname) +def update_authorized_keys(old_entries, keys): to_add = list(keys) - for i in range(0, len(entries)): - ent = entries[i] + for i in range(0, len(old_entries)): + ent = old_entries[i] if ent.empty() or not ent.base64: continue # Replace those with the same base64 @@ -199,66 +198,81 @@ def update_authorized_keys(fname, keys): # Don't add it later if k in to_add: to_add.remove(k) - entries[i] = ent + old_entries[i] = ent # Now append any entries we did not match above for key in to_add: - entries.append(key) + old_entries.append(key) # Now format them back to strings... - lines = [str(b) for b in entries] + lines = [str(b) for b in old_entries] # Ensure it ends with a newline lines.append('') return '\n'.join(lines) -def setup_user_keys(keys, user, key_prefix, paths): - # Make sure the users .ssh dir is setup accordingly - pwent = pwd.getpwnam(user) - ssh_dir = os.path.join(pwent.pw_dir, '.ssh') - ssh_dir = paths.join(False, ssh_dir) - if not os.path.exists(ssh_dir): - util.ensure_dir(ssh_dir, mode=0700) - util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) +def users_ssh_info(username, paths): + pw_ent = pwd.getpwnam(username) + if not pw_ent: + raise RuntimeError("Unable to get ssh info for user %r" % (username)) + ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh')) + return (ssh_dir, pw_ent) - # Turn the keys given into actual entries - parser = AuthKeyLineParser() - key_entries = [] - for k in keys: - key_entries.append(parser.parse(str(k), def_opt=key_prefix)) +def extract_authorized_keys(username, paths): + (ssh_dir, pw_ent) = users_ssh_info(username, paths) sshd_conf_fn = paths.join(True, DEF_SSHD_CFG) + auth_key_fn = None with util.SeLinuxGuard(ssh_dir, recursive=True): try: - # AuthorizedKeysFile may contain tokens + # The 'AuthorizedKeysFile' may contain tokens # of the form %T which are substituted during connection set-up. # The following tokens are defined: %% is replaced by a literal # '%', %h is replaced by the home directory of the user being # authenticated and %u is replaced by the username of that user. ssh_cfg = parse_ssh_config_map(sshd_conf_fn) - akeys = ssh_cfg.get("authorizedkeysfile", '') - akeys = akeys.strip() - if not akeys: - akeys = "%h/.ssh/authorized_keys" - akeys = akeys.replace("%h", pwent.pw_dir) - akeys = akeys.replace("%u", user) - akeys = akeys.replace("%%", '%') - if not akeys.startswith('/'): - akeys = os.path.join(pwent.pw_dir, akeys) - authorized_keys = paths.join(False, akeys) + auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip() + if not auth_key_fn: + auth_key_fn = "%h/.ssh/authorized_keys" + auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir) + auth_key_fn = auth_key_fn.replace("%u", username) + auth_key_fn = auth_key_fn.replace("%%", '%') + if not auth_key_fn.startswith('/'): + auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn) + auth_key_fn = paths.join(False, auth_key_fn) except (IOError, OSError): - authorized_keys = os.path.join(ssh_dir, 'authorized_keys') + # Give up and use a default key filename + auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'" " in ssh config" - " from %s, using 'AuthorizedKeysFile' file" - " %s instead"), - sshd_conf_fn, authorized_keys) - - content = update_authorized_keys(authorized_keys, key_entries) - util.ensure_dir(os.path.dirname(authorized_keys), mode=0700) - util.write_file(authorized_keys, content, mode=0600) - util.chownbyid(authorized_keys, pwent.pw_uid, pwent.pw_gid) + " from %r, using 'AuthorizedKeysFile' file" + " %r instead"), + sshd_conf_fn, auth_key_fn) + auth_key_entries = parse_authorized_keys(auth_key_fn) + return (auth_key_fn, auth_key_entries) + + +def setup_user_keys(keys, username, key_prefix, paths): + # Make sure the users .ssh dir is setup accordingly + (ssh_dir, pwent) = users_ssh_info(username, paths) + if not os.path.isdir(ssh_dir): + util.ensure_dir(ssh_dir, mode=0700) + util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) + + # Turn the 'update' keys given into actual entries + parser = AuthKeyLineParser() + key_entries = [] + for k in keys: + key_entries.append(parser.parse(str(k), def_opt=key_prefix)) + + # Extract the old and make the new + (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths) + with util.SeLinuxGuard(ssh_dir, recursive=True): + content = update_authorized_keys(auth_key_entries, key_entries) + util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700) + util.write_file(auth_key_fn, content, mode=0600) + util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid) class SshdConfigLine(object): diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 2f6a566c..c9634a90 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -326,7 +326,7 @@ class Init(object): 'paths': self.paths, 'datasource': self.datasource, } - # TODO Hmmm, should we dynamically import these?? + # TODO(harlowja) Hmmm, should we dynamically import these?? def_handlers = [ cc_part.CloudConfigPartHandler(**opts), ss_part.ShellScriptPartHandler(**opts), @@ -519,7 +519,7 @@ class Modules(object): " but not on %s distro. It may or may not work" " correctly."), name, worked_distros, d_name) # Use the configs logger and not our own - # TODO: possibly check the module + # TODO(harlowja): possibly check the module # for having a LOG attr and just give it back # its own logger? func_args = [name, self.cfg, diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index f5d01818..af98b488 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -23,9 +23,9 @@ import os import email +from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from email.mime.base import MIMEBase from cloudinit import handlers from cloudinit import log as logging @@ -159,7 +159,7 @@ class UserDataProcessor(object): if isinstance(ent, (str, basestring)): ent = {'content': ent} if not isinstance(ent, (dict)): - # TODO raise? + # TODO(harlowja) raise? continue content = ent.get('content', '') diff --git a/cloudinit/util.py b/cloudinit/util.py index a7d72d59..7d56e8be 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -24,8 +24,8 @@ from StringIO import StringIO -import copy as obj_copy import contextlib +import copy as obj_copy import errno import glob import grp @@ -317,8 +317,9 @@ def multi_log(text, console=True, stderr=True, else: log.log(log_level, text) + def is_ipv4(instr): - """ determine if input string is a ipv4 address. return boolean""" + """determine if input string is a ipv4 address. return boolean.""" toks = instr.split('.') if len(toks) != 4: return False @@ -826,12 +827,12 @@ def get_cmdline_url(names=('cloud-config-url', 'url'), def is_resolvable(name): - """ determine if a url is resolvable, return a boolean + """determine if a url is resolvable, return a boolean This also attempts to be resilent against dns redirection. Note, that normal nsswitch resolution is used here. So in order to avoid any utilization of 'search' entries in /etc/resolv.conf - we have to append '.'. + we have to append '.'. The top level 'invalid' domain is invalid per RFC. And example.com should also not exist. The random entry will be resolved inside @@ -847,7 +848,7 @@ def is_resolvable(name): try: result = socket.getaddrinfo(iname, None, 0, 0, socket.SOCK_STREAM, socket.AI_CANONNAME) - badresults[iname] = [] + badresults[iname] = [] for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) badips.add(sockaddr[0]) @@ -856,7 +857,7 @@ def is_resolvable(name): _DNS_REDIRECT_IP = badips if badresults: LOG.debug("detected dns redirection: %s" % badresults) - + try: result = socket.getaddrinfo(name, None) # check first result's sockaddr field @@ -874,7 +875,7 @@ def get_hostname(): def is_resolvable_url(url): - """ determine if this url is resolvable (existing or ip) """ + """determine if this url is resolvable (existing or ip).""" return (is_resolvable(urlparse.urlparse(url).hostname)) @@ -1105,7 +1106,7 @@ def hash_blob(blob, routine, mlen=None): def rename(src, dest): LOG.debug("Renaming %s to %s", src, dest) - # TODO use a se guard here?? + # TODO(harlowja) use a se guard here?? os.rename(src, dest) diff --git a/config/cloud.cfg b/config/cloud.cfg index 7933b4ce..2744c940 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -61,6 +61,7 @@ cloud_final_modules: - scripts-per-boot - scripts-per-instance - scripts-user + - ssh-authkey-fingerprints - keys-to-console - phone-home - final-message @@ -75,6 +76,18 @@ system_info: cloud_dir: /var/lib/cloud/ templates_dir: /etc/cloud/templates/ upstart_dir: /etc/init/ - package_mirror: http://archive.ubuntu.com/ubuntu - package_mirror_ec2_template: http://%(region)s.ec2.archive.ubuntu.com/ubuntu/ + package_mirrors: + - arches: [i386, amd64] + failsafe: + primary: http://archive.ubuntu.com/ubuntu + security: http://security.ubuntu.com/ubuntu + search: + primary: + - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ + - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ + security: [] + - arches: [armhf, armel, default] + failsafe: + primary: http://ports.ubuntu.com/ubuntu + security: http://ports.ubuntu.com/ubuntu ssh_svcname: ssh diff --git a/templates/sources.list.tmpl b/templates/sources.list.tmpl index f702025f..ce395b3d 100644 --- a/templates/sources.list.tmpl +++ b/templates/sources.list.tmpl @@ -52,9 +52,9 @@ deb-src $mirror $codename-updates universe # deb http://archive.canonical.com/ubuntu $codename partner # deb-src http://archive.canonical.com/ubuntu $codename partner -deb http://security.ubuntu.com/ubuntu $codename-security main -deb-src http://security.ubuntu.com/ubuntu $codename-security main -deb http://security.ubuntu.com/ubuntu $codename-security universe -deb-src http://security.ubuntu.com/ubuntu $codename-security universe -# deb http://security.ubuntu.com/ubuntu $codename-security multiverse -# deb-src http://security.ubuntu.com/ubuntu $codename-security multiverse +deb $security $codename-security main +deb-src $security $codename-security main +deb $security $codename-security universe +deb-src $security $codename-security universe +# deb $security $codename-security multiverse +# deb-src $security $codename-security multiverse diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 464c8c2f..ac082076 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -1,6 +1,6 @@ -import StringIO import logging import os +import StringIO import sys from mocker import MockerTestCase, ANY, ARGS, KWARGS @@ -61,14 +61,14 @@ class TestWalkerHandleHandler(MockerTestCase): import_mock(self.expected_module_name) self.mocker.result(self.module_fake) self.mocker.replay() - + handlers.walker_handle_handler(self.data, self.ctype, self.filename, self.payload) - + self.assertEqual(1, self.data["handlercount"]) - + def test_import_error(self): - """Module import errors are logged. No handler added to C{pdata}""" + """Module import errors are logged. No handler added to C{pdata}.""" import_mock = self.mocker.replace(importer.import_module, passthrough=False) import_mock(self.expected_module_name) @@ -81,7 +81,7 @@ class TestWalkerHandleHandler(MockerTestCase): self.assertEqual(0, self.data["handlercount"]) def test_attribute_error(self): - """Attribute errors are logged. No handler added to C{pdata}""" + """Attribute errors are logged. No handler added to C{pdata}.""" import_mock = self.mocker.replace(importer.import_module, passthrough=False) import_mock(self.expected_module_name) @@ -156,7 +156,7 @@ class TestHandlerHandlePart(MockerTestCase): self.payload, self.frequency) def test_no_handle_when_modfreq_once(self): - """C{handle_part} is not called if frequency is once""" + """C{handle_part} is not called if frequency is once.""" self.frequency = "once" mod_mock = self.mocker.mock() getattr(mod_mock, "frequency") diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 5bba8bc9..ebc0bd51 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -1,4 +1,4 @@ -"""Tests of the built-in user data handlers""" +"""Tests of the built-in user data handlers.""" import os @@ -33,7 +33,7 @@ class TestBuiltins(MockerTestCase): None, None, None) self.assertEquals(0, len(os.listdir(up_root))) - def test_upstart_frequency_single(self): + def test_upstart_frequency_single(self): c_root = self.makeDir() up_root = self.makeDir() paths = helpers.Paths({ diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index 54e152e9..bda61c7e 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -25,14 +25,15 @@ import os import shutil import tempfile -from unittest import TestCase from cloudinit import helpers +from unittest import TestCase # Get the cloudinit.sources.DataSourceAltCloud import items needed. import cloudinit.sources.DataSourceAltCloud from cloudinit.sources.DataSourceAltCloud import DataSourceAltCloud from cloudinit.sources.DataSourceAltCloud import read_user_data_callback + def _write_cloud_info_file(value): ''' Populate the CLOUD_INFO_FILE which would be populated @@ -44,12 +45,14 @@ def _write_cloud_info_file(value): cifile.close() os.chmod(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 0664) + def _remove_cloud_info_file(): ''' Remove the test CLOUD_INFO_FILE ''' os.remove(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE) + def _write_user_data_files(mount_dir, value): ''' Populate the deltacloud_user_data_file the user_data_file @@ -68,6 +71,7 @@ def _write_user_data_files(mount_dir, value): udfile.close() os.chmod(user_data_file, 0664) + def _remove_user_data_files(mount_dir, dc_file=True, non_dc_file=True): @@ -91,14 +95,15 @@ def _remove_user_data_files(mount_dir, except OSError: pass + class TestGetCloudType(TestCase): ''' - Test to exercise method: DataSourceAltCloud.get_cloud_type() + Test to exercise method: DataSourceAltCloud.get_cloud_type() ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) def tearDown(self): # Reset @@ -158,14 +163,15 @@ class TestGetCloudType(TestCase): self.assertEquals('UNKNOWN', \ dsrc.get_cloud_type()) + class TestGetDataCloudInfoFile(TestCase): ''' - Test to exercise method: DataSourceAltCloud.get_data() + Test to exercise method: DataSourceAltCloud.get_data() With a contrived CLOUD_INFO_FILE ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.cloud_info_file = tempfile.mkstemp()[1] cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ self.cloud_info_file @@ -183,52 +189,53 @@ class TestGetDataCloudInfoFile(TestCase): '/etc/sysconfig/cloud-info' def test_rhev(self): - '''Success Test module get_data() forcing RHEV ''' + '''Success Test module get_data() forcing RHEV.''' _write_cloud_info_file('RHEV') dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda : True + dsrc.user_data_rhevm = lambda: True self.assertEquals(True, dsrc.get_data()) def test_vsphere(self): - '''Success Test module get_data() forcing VSPHERE ''' + '''Success Test module get_data() forcing VSPHERE.''' _write_cloud_info_file('VSPHERE') dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda : True + dsrc.user_data_vsphere = lambda: True self.assertEquals(True, dsrc.get_data()) def test_fail_rhev(self): - '''Failure Test module get_data() forcing RHEV ''' + '''Failure Test module get_data() forcing RHEV.''' _write_cloud_info_file('RHEV') dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda : False + dsrc.user_data_rhevm = lambda: False self.assertEquals(False, dsrc.get_data()) def test_fail_vsphere(self): - '''Failure Test module get_data() forcing VSPHERE ''' + '''Failure Test module get_data() forcing VSPHERE.''' _write_cloud_info_file('VSPHERE') dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda : False + dsrc.user_data_vsphere = lambda: False self.assertEquals(False, dsrc.get_data()) def test_unrecognized(self): - '''Failure Test module get_data() forcing unrecognized ''' + '''Failure Test module get_data() forcing unrecognized.''' _write_cloud_info_file('unrecognized') dsrc = DataSourceAltCloud({}, None, self.paths) self.assertEquals(False, dsrc.get_data()) + class TestGetDataNoCloudInfoFile(TestCase): ''' - Test to exercise method: DataSourceAltCloud.get_data() + Test to exercise method: DataSourceAltCloud.get_data() Without a CLOUD_INFO_FILE ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ 'no such file' @@ -240,38 +247,39 @@ class TestGetDataNoCloudInfoFile(TestCase): ['dmidecode', '--string', 'system-product-name'] def test_rhev_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing RHEV ''' + '''Test No cloud info file module get_data() forcing RHEV.''' cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \ ['echo', 'RHEV Hypervisor'] dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda : True + dsrc.user_data_rhevm = lambda: True self.assertEquals(True, dsrc.get_data()) def test_vsphere_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing VSPHERE ''' + '''Test No cloud info file module get_data() forcing VSPHERE.''' cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \ ['echo', 'VMware Virtual Platform'] dsrc = DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda : True + dsrc.user_data_vsphere = lambda: True self.assertEquals(True, dsrc.get_data()) def test_failure_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing unrecognized ''' + '''Test No cloud info file module get_data() forcing unrecognized.''' cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \ ['echo', 'Unrecognized Platform'] dsrc = DataSourceAltCloud({}, None, self.paths) self.assertEquals(False, dsrc.get_data()) + class TestUserDataRhevm(TestCase): ''' - Test to exercise method: DataSourceAltCloud.user_data_rhevm() + Test to exercise method: DataSourceAltCloud.user_data_rhevm() ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.mount_dir = tempfile.mkdtemp() _write_user_data_files(self.mount_dir, 'test user data') @@ -295,7 +303,7 @@ class TestUserDataRhevm(TestCase): ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5'] def test_mount_cb_fails(self): - '''Test user_data_rhevm() where mount_cb fails''' + '''Test user_data_rhevm() where mount_cb fails.''' cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ ['echo', 'modprobe floppy'] @@ -305,7 +313,7 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) def test_modprobe_fails(self): - '''Test user_data_rhevm() where modprobe fails. ''' + '''Test user_data_rhevm() where modprobe fails.''' cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ ['ls', 'modprobe floppy'] @@ -315,7 +323,7 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) def test_no_modprobe_cmd(self): - '''Test user_data_rhevm() with no modprobe command. ''' + '''Test user_data_rhevm() with no modprobe command.''' cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ ['bad command', 'modprobe floppy'] @@ -325,7 +333,7 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) def test_udevadm_fails(self): - '''Test user_data_rhevm() where udevadm fails. ''' + '''Test user_data_rhevm() where udevadm fails.''' cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \ ['ls', 'udevadm floppy'] @@ -335,7 +343,7 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) def test_no_udevadm_cmd(self): - '''Test user_data_rhevm() with no udevadm command. ''' + '''Test user_data_rhevm() with no udevadm command.''' cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \ ['bad command', 'udevadm floppy'] @@ -344,13 +352,14 @@ class TestUserDataRhevm(TestCase): self.assertEquals(False, dsrc.user_data_rhevm()) + class TestUserDataVsphere(TestCase): ''' - Test to exercise method: DataSourceAltCloud.user_data_vsphere() + Test to exercise method: DataSourceAltCloud.user_data_vsphere() ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.mount_dir = tempfile.mkdtemp() _write_user_data_files(self.mount_dir, 'test user data') @@ -370,7 +379,7 @@ class TestUserDataVsphere(TestCase): '/etc/sysconfig/cloud-info' def test_user_data_vsphere(self): - '''Test user_data_vsphere() where mount_cb fails''' + '''Test user_data_vsphere() where mount_cb fails.''' cloudinit.sources.DataSourceAltCloud.MEDIA_DIR = self.mount_dir @@ -378,13 +387,14 @@ class TestUserDataVsphere(TestCase): self.assertEquals(False, dsrc.user_data_vsphere()) + class TestReadUserDataCallback(TestCase): ''' - Test to exercise method: DataSourceAltCloud.read_user_data_callback() + Test to exercise method: DataSourceAltCloud.read_user_data_callback() ''' def setUp(self): - ''' Set up ''' - self.paths = helpers.Paths({ 'cloud_dir': '/tmp' }) + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.mount_dir = tempfile.mkdtemp() _write_user_data_files(self.mount_dir, 'test user data') @@ -400,15 +410,14 @@ class TestReadUserDataCallback(TestCase): except OSError: pass - def test_callback_both(self): - '''Test read_user_data_callback() with both files''' + '''Test read_user_data_callback() with both files.''' self.assertEquals('test user data', read_user_data_callback(self.mount_dir)) def test_callback_dc(self): - '''Test read_user_data_callback() with only DC file''' + '''Test read_user_data_callback() with only DC file.''' _remove_user_data_files(self.mount_dir, dc_file=False, @@ -418,7 +427,7 @@ class TestReadUserDataCallback(TestCase): read_user_data_callback(self.mount_dir)) def test_callback_non_dc(self): - '''Test read_user_data_callback() with only non-DC file''' + '''Test read_user_data_callback() with only non-DC file.''' _remove_user_data_files(self.mount_dir, dc_file=True, @@ -428,9 +437,9 @@ class TestReadUserDataCallback(TestCase): read_user_data_callback(self.mount_dir)) def test_callback_none(self): - '''Test read_user_data_callback() no files are found''' + '''Test read_user_data_callback() no files are found.''' - _remove_user_data_files(self.mount_dir) + _remove_user_data_files(self.mount_dir) self.assertEquals(None, read_user_data_callback(self.mount_dir)) # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 8a155f39..85e6add0 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -1,8 +1,8 @@ -import os from copy import copy +import os -from cloudinit import url_helper from cloudinit.sources import DataSourceMAAS +from cloudinit import url_helper from mocker import MockerTestCase @@ -15,7 +15,7 @@ class TestMAASDataSource(MockerTestCase): self.tmp = self.makeDir() def test_seed_dir_valid(self): - """Verify a valid seeddir is read as such""" + """Verify a valid seeddir is read as such.""" data = {'instance-id': 'i-valid01', 'local-hostname': 'valid01-hostname', @@ -35,7 +35,7 @@ class TestMAASDataSource(MockerTestCase): self.assertFalse(('user-data' in metadata)) def test_seed_dir_valid_extra(self): - """Verify extra files do not affect seed_dir validity """ + """Verify extra files do not affect seed_dir validity.""" data = {'instance-id': 'i-valid-extra', 'local-hostname': 'valid-extra-hostname', @@ -54,7 +54,7 @@ class TestMAASDataSource(MockerTestCase): self.assertFalse(('foo' in metadata)) def test_seed_dir_invalid(self): - """Verify that invalid seed_dir raises MAASSeedDirMalformed""" + """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" valid = {'instance-id': 'i-instanceid', 'local-hostname': 'test-hostname', 'user-data': ''} @@ -78,20 +78,20 @@ class TestMAASDataSource(MockerTestCase): DataSourceMAAS.read_maas_seed_dir, my_d) def test_seed_dir_none(self): - """Verify that empty seed_dir raises MAASSeedDirNone""" + """Verify that empty seed_dir raises MAASSeedDirNone.""" my_d = os.path.join(self.tmp, "valid_empty") self.assertRaises(DataSourceMAAS.MAASSeedDirNone, DataSourceMAAS.read_maas_seed_dir, my_d) def test_seed_dir_missing(self): - """Verify that missing seed_dir raises MAASSeedDirNone""" - self.assertRaises(DataSourceMAAS.MAASSeedDirNone, + """Verify that missing seed_dir raises MAASSeedDirNone.""" + self.assertRaises(DataSourceMAAS.MAASSeedDirNone, DataSourceMAAS.read_maas_seed_dir, os.path.join(self.tmp, "nonexistantdirectory")) def test_seed_url_valid(self): - """Verify that valid seed_url is read as such""" + """Verify that valid seed_url is read as such.""" valid = {'meta-data/instance-id': 'i-instanceid', 'meta-data/local-hostname': 'test-hostname', 'meta-data/public-keys': 'test-hostname', @@ -129,11 +129,11 @@ class TestMAASDataSource(MockerTestCase): valid['meta-data/local-hostname']) def test_seed_url_invalid(self): - """Verify that invalid seed_url raises MAASSeedDirMalformed""" + """Verify that invalid seed_url raises MAASSeedDirMalformed.""" pass def test_seed_url_missing(self): - """Verify seed_url with no found entries raises MAASSeedDirNone""" + """Verify seed_url with no found entries raises MAASSeedDirNone.""" pass diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py new file mode 100644 index 00000000..2df4c2f0 --- /dev/null +++ b/tests/unittests/test_distros/test_generic.py @@ -0,0 +1,121 @@ +from mocker import MockerTestCase + +from cloudinit import distros + +unknown_arch_info = { + 'arches': ['default'], + 'failsafe': {'primary': 'http://fs-primary-default', + 'security': 'http://fs-security-default'} +} + +package_mirrors = [ + {'arches': ['i386', 'amd64'], + 'failsafe': {'primary': 'http://fs-primary-intel', + 'security': 'http://fs-security-intel'}, + 'search': { + 'primary': ['http://%(ec2_region)s.ec2/', + 'http://%(availability_zone)s.clouds/'], + 'security': ['http://security-mirror1-intel', + 'http://security-mirror2-intel']}}, + {'arches': ['armhf', 'armel'], + 'failsafe': {'primary': 'http://fs-primary-arm', + 'security': 'http://fs-security-arm'}}, + unknown_arch_info +] + +gpmi = distros._get_package_mirror_info # pylint: disable=W0212 +gapmi = distros._get_arch_package_mirror_info # pylint: disable=W0212 + + +class TestGenericDistro(MockerTestCase): + + def return_first(self, mlist): + if not mlist: + return None + return mlist[0] + + def return_second(self, mlist): + if not mlist: + return None + return mlist[1] + + def return_none(self, _mlist): + return None + + def return_last(self, mlist): + if not mlist: + return None + return(mlist[-1]) + + def setUp(self): + super(TestGenericDistro, self).setUp() + # Make a temp directoy for tests to use. + self.tmp = self.makeDir() + + def test_arch_package_mirror_info_unknown(self): + """for an unknown arch, we should get back that with arch 'default'.""" + arch_mirrors = gapmi(package_mirrors, arch="unknown") + self.assertEqual(unknown_arch_info, arch_mirrors) + + def test_arch_package_mirror_info_known(self): + arch_mirrors = gapmi(package_mirrors, arch="amd64") + self.assertEqual(package_mirrors[0], arch_mirrors) + + def test_get_package_mirror_info_az_ec2(self): + arch_mirrors = gapmi(package_mirrors, arch="amd64") + + results = gpmi(arch_mirrors, availability_zone="us-east-1a", + mirror_filter=self.return_first) + self.assertEqual(results, + {'primary': 'http://us-east-1.ec2/', + 'security': 'http://security-mirror1-intel'}) + + results = gpmi(arch_mirrors, availability_zone="us-east-1a", + mirror_filter=self.return_second) + self.assertEqual(results, + {'primary': 'http://us-east-1a.clouds/', + 'security': 'http://security-mirror2-intel'}) + + results = gpmi(arch_mirrors, availability_zone="us-east-1a", + mirror_filter=self.return_none) + self.assertEqual(results, package_mirrors[0]['failsafe']) + + def test_get_package_mirror_info_az_non_ec2(self): + arch_mirrors = gapmi(package_mirrors, arch="amd64") + + results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor", + mirror_filter=self.return_first) + self.assertEqual(results, + {'primary': 'http://nova.cloudvendor.clouds/', + 'security': 'http://security-mirror1-intel'}) + + results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor", + mirror_filter=self.return_last) + self.assertEqual(results, + {'primary': 'http://nova.cloudvendor.clouds/', + 'security': 'http://security-mirror2-intel'}) + + def test_get_package_mirror_info_none(self): + arch_mirrors = gapmi(package_mirrors, arch="amd64") + + # because both search entries here replacement based on + # availability-zone, the filter will be called with an empty list and + # failsafe should be taken. + results = gpmi(arch_mirrors, availability_zone=None, + mirror_filter=self.return_first) + self.assertEqual(results, + {'primary': 'http://fs-primary-intel', + 'security': 'http://security-mirror1-intel'}) + + results = gpmi(arch_mirrors, availability_zone=None, + mirror_filter=self.return_last) + self.assertEqual(results, + {'primary': 'http://fs-primary-intel', + 'security': 'http://security-mirror2-intel'}) + + +#def _get_package_mirror_info(mirror_info, availability_zone=None, +# mirror_filter=util.search_for_mirror): + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index 948de4c4..d3df5c50 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -1,8 +1,8 @@ from mocker import MockerTestCase -from cloudinit import util from cloudinit import cloud from cloudinit import helpers +from cloudinit import util from cloudinit.config import cc_ca_certs @@ -64,7 +64,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_empty_trusted_list(self): - """Test that no certificate are written if 'trusted' list is empty""" + """Test that no certificate are written if 'trusted' list is empty.""" config = {"ca-certs": {"trusted": []}} # No functions should be called @@ -74,7 +74,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_single_trusted(self): - """Test that a single cert gets passed to add_ca_certs""" + """Test that a single cert gets passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1"]}} self.mock_add(self.paths, ["CERT1"]) @@ -84,7 +84,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_multiple_trusted(self): - """Test that multiple certs get passed to add_ca_certs""" + """Test that multiple certs get passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} self.mock_add(self.paths, ["CERT1", "CERT2"]) @@ -94,7 +94,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_remove_default_ca_certs(self): - """Test remove_defaults works as expected""" + """Test remove_defaults works as expected.""" config = {"ca-certs": {"remove-defaults": True}} self.mock_remove(self.paths) @@ -104,7 +104,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_no_remove_defaults_if_false(self): - """Test remove_defaults is not called when config value is False""" + """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": False}} self.mock_update() @@ -113,7 +113,7 @@ class TestConfig(MockerTestCase): cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) def test_correct_order_for_remove_then_add(self): - """Test remove_defaults is not called when config value is False""" + """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} self.mock_remove(self.paths) @@ -139,7 +139,7 @@ class TestAddCaCerts(MockerTestCase): cc_ca_certs.add_ca_certs(self.paths, []) def test_single_cert(self): - """Test adding a single certificate to the trusted CAs""" + """Test adding a single certificate to the trusted CAs.""" cert = "CERT1\nLINE2\nLINE3" mock_write = self.mocker.replace(util.write_file, passthrough=False) @@ -152,7 +152,7 @@ class TestAddCaCerts(MockerTestCase): cc_ca_certs.add_ca_certs(self.paths, [cert]) def test_multiple_certs(self): - """Test adding multiple certificates to the trusted CAs""" + """Test adding multiple certificates to the trusted CAs.""" certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"] expected_cert_file = "\n".join(certs) diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index fbbf07f2..82a4c555 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -1,4 +1,4 @@ -"""Tests for handling of userdata within cloud init""" +"""Tests for handling of userdata within cloud init.""" import StringIO @@ -54,7 +54,7 @@ class TestConsumeUserData(MockerTestCase): return log_file def test_unhandled_type_warning(self): - """Raw text without magic is ignored but shows warning""" + """Raw text without magic is ignored but shows warning.""" ci = stages.Init() data = "arbitrary text\n" ci.datasource = FakeDataSource(data) @@ -70,7 +70,7 @@ class TestConsumeUserData(MockerTestCase): log_file.getvalue()) def test_mime_text_plain(self): - """Mime message of type text/plain is ignored but shows warning""" + """Mime message of type text/plain is ignored but shows warning.""" ci = stages.Init() message = MIMEBase("text", "plain") message.set_payload("Just text") @@ -86,9 +86,8 @@ class TestConsumeUserData(MockerTestCase): "Unhandled unknown content-type (text/plain)", log_file.getvalue()) - def test_shellscript(self): - """Raw text starting #!/bin/sh is treated as script""" + """Raw text starting #!/bin/sh is treated as script.""" ci = stages.Init() script = "#!/bin/sh\necho hello\n" ci.datasource = FakeDataSource(script) @@ -104,7 +103,7 @@ class TestConsumeUserData(MockerTestCase): self.assertEqual("", log_file.getvalue()) def test_mime_text_x_shellscript(self): - """Mime message of type text/x-shellscript is treated as script""" + """Mime message of type text/x-shellscript is treated as script.""" ci = stages.Init() script = "#!/bin/sh\necho hello\n" message = MIMEBase("text", "x-shellscript") @@ -122,7 +121,7 @@ class TestConsumeUserData(MockerTestCase): self.assertEqual("", log_file.getvalue()) def test_mime_text_plain_shell(self): - """Mime type text/plain starting #!/bin/sh is treated as script""" + """Mime type text/plain starting #!/bin/sh is treated as script.""" ci = stages.Init() script = "#!/bin/sh\necho hello\n" message = MIMEBase("text", "plain") diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 19f66cc4..15fcbd26 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1,11 +1,11 @@ import os import stat -from unittest import TestCase from mocker import MockerTestCase +from unittest import TestCase -from cloudinit import util from cloudinit import importer +from cloudinit import util class FakeSelinux(object): diff --git a/tools/hacking.py b/tools/hacking.py index a2e6e829..11163df3 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -100,7 +100,7 @@ def cloud_todo_format(physical_line): """ pos = physical_line.find('TODO') pos1 = physical_line.find('TODO(') - pos2 = physical_line.find('#') # make sure it's a comment + pos2 = physical_line.find('#') # make sure it's a comment if (pos != pos1 and pos2 >= 0 and pos2 < pos): return pos, "N101: Use TODO(NAME)" @@ -133,7 +133,6 @@ def cloud_docstring_multiline_end(physical_line): return (pos, "N403: multi line docstring end on new line") - current_file = "" @@ -169,4 +168,3 @@ if __name__ == "__main__": if len(_missingImport) > 0: print >> sys.stderr, ("%i imports missing in this test environment" % len(_missingImport)) - diff --git a/tools/mock-meta.py b/tools/mock-meta.py index 78838f64..c79f0598 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -6,7 +6,7 @@ """ To use this to mimic the EC2 metadata service entirely, run it like: - # Where 'eth0' is *some* interface. + # Where 'eth0' is *some* interface. sudo ifconfig eth0:0 169.254.169.254 netmask 255.255.255.255 sudo ./mock-meta.py -a 169.254.169.254 -p 80 @@ -23,7 +23,7 @@ import json import logging import os import random -import string # pylint: disable=W0402 +import string # pylint: disable=W0402 import sys import yaml @@ -156,6 +156,8 @@ def traverse(keys, mp): ID_CHARS = [c for c in (string.ascii_uppercase + string.digits)] + + def id_generator(size=6, lower=False): txt = ''.join(random.choice(ID_CHARS) for x in range(size)) if lower: @@ -235,11 +237,11 @@ class MetaDataHandler(object): nparams = params[1:] # This is a weird kludge, why amazon why!!! # public-keys is messed up, list of /latest/meta-data/public-keys/ - # shows something like: '0=brickies' - # but a GET to /latest/meta-data/public-keys/0=brickies will fail - # you have to know to get '/latest/meta-data/public-keys/0', then - # from there you get a 'openssh-key', which you can get. - # this hunk of code just re-works the object for that. + # shows something like: '0=brickies' + # but a GET to /latest/meta-data/public-keys/0=brickies will fail + # you have to know to get '/latest/meta-data/public-keys/0', then + # from there you get a 'openssh-key', which you can get. + # this hunk of code just re-works the object for that. avail_keys = get_ssh_keys() key_ids = sorted(list(avail_keys.keys())) if nparams: @@ -255,7 +257,7 @@ class MetaDataHandler(object): "openssh-key": "\n".join(avail_keys[key_name]), }) if isinstance(result, (dict)): - # TODO: This might not be right?? + # TODO(harlowja): This might not be right?? result = "\n".join(sorted(result.keys())) if not result: result = '' @@ -304,13 +306,13 @@ class UserDataHandler(object): blob = "\n".join(lines) return blob.strip() - def get_data(self, params, who, **kwargs): # pylint: disable=W0613 + def get_data(self, params, who, **kwargs): # pylint: disable=W0613 if not params: return self._get_user_blob(who=who) return NOT_IMPL_RESPONSE -# Seem to need to use globals since can't pass +# Seem to need to use globals since can't pass # data into the request handlers instances... # Puke! meta_fetcher = None @@ -432,7 +434,7 @@ def setup_fetchers(opts): def run_server(): - # Using global here since it doesn't seem like we + # Using global here since it doesn't seem like we # can pass opts into a request handler constructor... opts = extract_opts() setup_logging(logging.DEBUG) |