From ea21cecc94b613b063d17588d4cd6612bc56753b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:15:48 -0700 Subject: Add in a configuration module that can write out the yum.repo format for those that want to hook into different repos for installing. --- cloudinit/config/cc_yum_add_repo.py | 93 +++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 cloudinit/config/cc_yum_add_repo.py diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py new file mode 100644 index 00000000..41dc72a0 --- /dev/null +++ b/cloudinit/config/cc_yum_add_repo.py @@ -0,0 +1,93 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this + +import os + +from cloudinit import templater +from cloudinit import util + +import configobj + + +def _canonicalize_id(repo_id): + repo_id = repo_id.lower().replace("-", "_") + repo_id = repo_id.replace(" ", "_") + return repo_id + + +## TODO(harlowja): move to distro? +# See man yum.conf +def _format_repository_config(repo_id, repo_config): + to_be = configobj.ConfigObj() + to_be[repo_id] = {} + # Do basic translation of + for (k, v) in repo_config.items(): + if isinstance(v, bool): + if v: + v = '1' + else: + v = '0' + elif isinstance(v, (tuple, list)): + v = "\n ".join(v) + # For now assume that peopel using this know + # the format of yum and don't verify further + to_be[repo_id][k] = v + lines = to_be.write() + return "\n".join(lines) + + +def handle(name, cfg, cloud, log, _args): + repos = cfg.get('yum_repos') + if not repos: + log.debug(("Skipping module named %s," + " no 'yum_repos' configuration found"), name) + return + repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir', + '/etc/yum.repos.d/') + repo_locations = {} + repo_configs = {} + for (repo_id, repo_config) in repos.items(): + canon_repo_id = _canonicalize_id(repo_id) + repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) + if os.path.exists(repo_fn_pth): + log.info("Skipping repo %s, file %s already exists!", + repo_id, repo_fn_pth) + continue + elif canon_repo_id in repo_locations: + log.info("Skipping repo %s, file %s already pending!", + repo_id, repo_fn_pth) + continue + if not repo_config: + repo_config = {} + # Do some basic sanity checks/cleaning + n_repo_config = {} + for (k, v) in repo_config.items(): + k = k.lower().strip().replace("-", "_") + if k: + n_repo_config[k] = v + repo_config = n_repo_config + if not 'baseurl' in repo_config: + log.warn("Repository %s does not contain a baseurl address", repo_id) + else: + repo_configs[canon_repo_id] = repo_config + repo_locations[canon_repo_id] = repo_fn_pth + for (c_repo_id, path) in repo_locations.items(): + repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) + util.write_file(path, repo_blob) -- cgit v1.2.3 From 19c640ebfd3832ec1582c6c134ea68efac95588c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:17:20 -0700 Subject: Remove some pylint buggies. --- cloudinit/config/cc_yum_add_repo.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 41dc72a0..10c423b5 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -20,7 +20,6 @@ import os -from cloudinit import templater from cloudinit import util import configobj @@ -53,7 +52,7 @@ def _format_repository_config(repo_id, repo_config): return "\n".join(lines) -def handle(name, cfg, cloud, log, _args): +def handle(name, cfg, _cloud, log, _args): repos = cfg.get('yum_repos') if not repos: log.debug(("Skipping module named %s," @@ -84,10 +83,12 @@ def handle(name, cfg, cloud, log, _args): n_repo_config[k] = v repo_config = n_repo_config if not 'baseurl' in repo_config: - log.warn("Repository %s does not contain a baseurl address", repo_id) + log.warn("Repository %s does not contain a baseurl address", + repo_id) else: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth for (c_repo_id, path) in repo_locations.items(): - repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) + repo_blob = _format_repository_config(c_repo_id, + repo_configs.get(c_repo_id)) util.write_file(path, repo_blob) -- cgit v1.2.3 From e8371b113c078f3017c44804662960da9a04abf2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:19:14 -0700 Subject: Fix copyright. --- cloudinit/config/cc_yum_add_repo.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 10c423b5..f0487773 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -1,10 +1,8 @@ # vi: ts=4 expandtab # -# Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. # -# Author: Scott Moser -# Author: Juerg Haefliger +# Author: Joshua Harlow # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -16,7 +14,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this +# along with this program. If not, see . import os -- cgit v1.2.3 From cf80962e51a925d5b895d0ef90c2a15ad99778b3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:24:15 -0700 Subject: Update comments as to why format is the way it is. --- cloudinit/config/cc_yum_add_repo.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index f0487773..fda3236f 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -34,14 +34,17 @@ def _canonicalize_id(repo_id): def _format_repository_config(repo_id, repo_config): to_be = configobj.ConfigObj() to_be[repo_id] = {} - # Do basic translation of + # Do basic translation of the items -> values for (k, v) in repo_config.items(): if isinstance(v, bool): + # Seems like yum prefers 1/0 if v: v = '1' else: v = '0' elif isinstance(v, (tuple, list)): + # Can handle 'lists' in certain cases + # See: http://bit.ly/Qqrf1t v = "\n ".join(v) # For now assume that peopel using this know # the format of yum and don't verify further -- cgit v1.2.3 From 7029732d496181233f2115dbfd65b13d20aceca7 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 19:18:15 -0700 Subject: Add a more generic package install mechansim that removes some of the code in apt_update_upgrade to do upgrades and installs and places it in a generic package module and adjusts some of the reboot backoffs and log flushing/sleeping that was happening there. --- cloudinit/config/cc_apt_configure.py | 258 +++++++++++++++++ cloudinit/config/cc_apt_update_upgrade.py | 305 --------------------- .../config/cc_package_update_upgrade_install.py | 110 ++++++++ 3 files changed, 368 insertions(+), 305 deletions(-) create mode 100644 cloudinit/config/cc_apt_configure.py delete mode 100644 cloudinit/config/cc_apt_update_upgrade.py create mode 100644 cloudinit/config/cc_package_update_upgrade_install.py diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py new file mode 100644 index 00000000..2cf65ecc --- /dev/null +++ b/cloudinit/config/cc_apt_configure.py @@ -0,0 +1,258 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2009-2010 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import glob +import os + +from cloudinit import templater +from cloudinit import util + +distros = ['ubuntu', 'debian'] + +PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" +PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" + +# A temporary shell program to get a given gpg key +# from a given keyserver +EXPORT_GPG_KEYID = """ + k=${1} ks=${2}; + exec 2>/dev/null + [ -n "$k" ] || exit 1; + armour=$(gpg --list-keys --armour "${k}") + if [ -z "${armour}" ]; then + gpg --keyserver ${ks} --recv $k >/dev/null && + armour=$(gpg --export --armour "${k}") && + gpg --batch --yes --delete-keys "${k}" + fi + [ -n "${armour}" ] && echo "${armour}" +""" + + +def handle(name, cfg, cloud, log, _args): + release = get_release() + mirrors = find_apt_mirror_info(cloud, cfg) + if not mirrors or "primary" not in mirrors: + log.debug(("Skipping module named %s," + " no package 'mirror' located"), name) + return + + # backwards compatibility + mirror = mirrors["primary"] + mirrors["mirror"] = mirror + + log.debug("Mirror info: %s" % mirrors) + + if not util.get_cfg_option_bool(cfg, + 'apt_preserve_sources_list', False): + generate_sources_list(release, mirrors, cloud, log) + old_mirrors = cfg.get('apt_old_mirrors', + {"primary": "archive.ubuntu.com/ubuntu", + "security": "security.ubuntu.com/ubuntu"}) + rename_apt_lists(old_mirrors, mirrors) + + # Set up any apt proxy + proxy = cfg.get("apt_proxy", None) + proxy_filename = PROXY_FN + if proxy: + try: + # See man 'apt.conf' + contents = PROXY_TPL % (proxy) + util.write_file(cloud.paths.join(False, proxy_filename), + contents) + except Exception as e: + util.logexc(log, "Failed to write proxy to %s", proxy_filename) + elif os.path.isfile(proxy_filename): + util.del_file(proxy_filename) + + # Process 'apt_sources' + if 'apt_sources' in cfg: + params = mirrors + params['RELEASE'] = release + params['MIRROR'] = mirror + errors = add_sources(cloud, cfg['apt_sources'], params) + for e in errors: + log.warn("Add source error: %s", ':'.join(e)) + + dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) + if dconf_sel: + log.debug("Setting debconf selections per cloud config") + try: + util.subp(('debconf-set-selections', '-'), dconf_sel) + except Exception: + util.logexc(log, "Failed to run debconf-set-selections") + + +# get gpg keyid from keyserver +def getkeybyid(keyid, keyserver): + with util.ExtendedTemporaryFile(suffix='.sh') as fh: + fh.write(EXPORT_GPG_KEYID) + fh.flush() + cmd = ['/bin/sh', fh.name, keyid, keyserver] + (stdout, _stderr) = util.subp(cmd) + return stdout.strip() + + +def mirror2lists_fileprefix(mirror): + string = mirror + # take off http:// or ftp:// + if string.endswith("/"): + string = string[0:-1] + pos = string.find("://") + if pos >= 0: + string = string[pos + 3:] + string = string.replace("/", "_") + return string + + +def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): + for (name, omirror) in old_mirrors.iteritems(): + nmirror = new_mirrors.get(name) + if not nmirror: + continue + oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) + nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror)) + if oprefix == nprefix: + continue + olen = len(oprefix) + for filename in glob.glob("%s_*" % oprefix): + util.rename(filename, "%s%s" % (nprefix, filename[olen:])) + + +def get_release(): + (stdout, _stderr) = util.subp(['lsb_release', '-cs']) + return stdout.strip() + + +def generate_sources_list(codename, mirrors, cloud, log): + template_fn = cloud.get_template_filename('sources.list') + if not template_fn: + log.warn("No template found, not rendering /etc/apt/sources.list") + return + + params = {'codename': codename} + for k in mirrors: + params[k] = mirrors[k] + out_fn = cloud.paths.join(False, '/etc/apt/sources.list') + templater.render_to_file(template_fn, out_fn, params) + + +def add_sources(cloud, srclist, template_params=None): + """ + add entries in /etc/apt/sources.list.d for each abbreviated + sources.list entry in 'srclist'. When rendering template, also + include the values in dictionary searchList + """ + if template_params is None: + template_params = {} + + errorlist = [] + for ent in srclist: + if 'source' not in ent: + errorlist.append(["", "missing source"]) + continue + + source = ent['source'] + if source.startswith("ppa:"): + try: + util.subp(["add-apt-repository", source]) + except: + errorlist.append([source, "add-apt-repository failed"]) + continue + + source = templater.render_string(source, template_params) + + if 'filename' not in ent: + ent['filename'] = 'cloud_config_sources.list' + + if not ent['filename'].startswith("/"): + ent['filename'] = os.path.join("/etc/apt/sources.list.d/", + ent['filename']) + + if ('keyid' in ent and 'key' not in ent): + ks = "keyserver.ubuntu.com" + if 'keyserver' in ent: + ks = ent['keyserver'] + try: + ent['key'] = getkeybyid(ent['keyid'], ks) + except: + errorlist.append([source, "failed to get key from %s" % ks]) + continue + + if 'key' in ent: + try: + util.subp(('apt-key', 'add', '-'), ent['key']) + except: + errorlist.append([source, "failed add key"]) + + try: + contents = "%s\n" % (source) + util.write_file(cloud.paths.join(False, ent['filename']), + contents, omode="ab") + except: + errorlist.append([source, + "failed write to file %s" % ent['filename']]) + + return errorlist + + +def find_apt_mirror_info(cloud, cfg): + """find an apt_mirror given the cloud and cfg provided.""" + + mirror = None + + # this is less preferred way of specifying mirror preferred would be to + # use the distro's search or package_mirror. + mirror = cfg.get("apt_mirror", None) + + search = cfg.get("apt_mirror_search", None) + if not mirror and search: + mirror = util.search_for_mirror(search) + + if (not mirror and + util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): + mydom = "" + doms = [] + + # if we have a fqdn, then search its domain portion first + (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) + mydom = ".".join(fqdn.split(".")[1:]) + if mydom: + doms.append(".%s" % mydom) + + doms.extend((".localdomain", "",)) + + mirror_list = [] + distro = cloud.distro.name + mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) + for post in doms: + mirror_list.append(mirrorfmt % (post)) + + mirror = util.search_for_mirror(mirror_list) + + mirror_info = cloud.datasource.get_package_mirror_info() + + # this is a bit strange. + # if mirror is set, then one of the legacy options above set it + # but they do not cover security. so we need to get that from + # get_package_mirror_info + if mirror: + mirror_info.update({'primary': mirror}) + + return mirror_info diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py deleted file mode 100644 index 356bb98d..00000000 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ /dev/null @@ -1,305 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# -# Author: Scott Moser -# Author: Juerg Haefliger -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import glob -import os -import time - -from cloudinit import templater -from cloudinit import util - -distros = ['ubuntu', 'debian'] - -PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" -PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" - -# A temporary shell program to get a given gpg key -# from a given keyserver -EXPORT_GPG_KEYID = """ - k=${1} ks=${2}; - exec 2>/dev/null - [ -n "$k" ] || exit 1; - armour=$(gpg --list-keys --armour "${k}") - if [ -z "${armour}" ]; then - gpg --keyserver ${ks} --recv $k >/dev/null && - armour=$(gpg --export --armour "${k}") && - gpg --batch --yes --delete-keys "${k}" - fi - [ -n "${armour}" ] && echo "${armour}" -""" - - -def handle(name, cfg, cloud, log, _args): - update = util.get_cfg_option_bool(cfg, 'apt_update', False) - upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) - - release = get_release() - mirrors = find_apt_mirror_info(cloud, cfg) - if not mirrors or "primary" not in mirrors: - log.debug(("Skipping module named %s," - " no package 'mirror' located"), name) - return - - # backwards compatibility - mirror = mirrors["primary"] - mirrors["mirror"] = mirror - - log.debug("mirror info: %s" % mirrors) - - if not util.get_cfg_option_bool(cfg, - 'apt_preserve_sources_list', False): - generate_sources_list(release, mirrors, cloud, log) - old_mirrors = cfg.get('apt_old_mirrors', - {"primary": "archive.ubuntu.com/ubuntu", - "security": "security.ubuntu.com/ubuntu"}) - rename_apt_lists(old_mirrors, mirrors) - - # Set up any apt proxy - proxy = cfg.get("apt_proxy", None) - proxy_filename = PROXY_FN - if proxy: - try: - # See man 'apt.conf' - contents = PROXY_TPL % (proxy) - util.write_file(cloud.paths.join(False, proxy_filename), - contents) - except Exception as e: - util.logexc(log, "Failed to write proxy to %s", proxy_filename) - elif os.path.isfile(proxy_filename): - util.del_file(proxy_filename) - - # Process 'apt_sources' - if 'apt_sources' in cfg: - params = mirrors - params['RELEASE'] = release - params['MIRROR'] = mirror - errors = add_sources(cloud, cfg['apt_sources'], params) - for e in errors: - log.warn("Source Error: %s", ':'.join(e)) - - dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) - if dconf_sel: - log.debug("setting debconf selections per cloud config") - try: - util.subp(('debconf-set-selections', '-'), dconf_sel) - except: - util.logexc(log, "Failed to run debconf-set-selections") - - pkglist = util.get_cfg_option_list(cfg, 'packages', []) - - errors = [] - if update or len(pkglist) or upgrade: - try: - cloud.distro.update_package_sources() - except Exception as e: - util.logexc(log, "Package update failed") - errors.append(e) - - if upgrade: - try: - cloud.distro.package_command("upgrade") - except Exception as e: - util.logexc(log, "Package upgrade failed") - errors.append(e) - - if len(pkglist): - try: - cloud.distro.install_packages(pkglist) - except Exception as e: - util.logexc(log, "Failed to install packages: %s ", pkglist) - errors.append(e) - - # kernel and openssl (possibly some other packages) - # write a file /var/run/reboot-required after upgrading. - # if that file exists and configured, then just stop right now and reboot - # TODO(smoser): handle this less voilently - reboot_file = "/var/run/reboot-required" - if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and - os.path.isfile(reboot_file)): - log.warn("rebooting after upgrade or install per %s" % reboot_file) - time.sleep(1) # give the warning time to get out - util.subp(["/sbin/reboot"]) - time.sleep(60) - log.warn("requested reboot did not happen!") - errors.append(Exception("requested reboot did not happen!")) - - if len(errors): - log.warn("%s failed with exceptions, re-raising the last one", - len(errors)) - raise errors[-1] - - -# get gpg keyid from keyserver -def getkeybyid(keyid, keyserver): - with util.ExtendedTemporaryFile(suffix='.sh') as fh: - fh.write(EXPORT_GPG_KEYID) - fh.flush() - cmd = ['/bin/sh', fh.name, keyid, keyserver] - (stdout, _stderr) = util.subp(cmd) - return stdout.strip() - - -def mirror2lists_fileprefix(mirror): - string = mirror - # take off http:// or ftp:// - if string.endswith("/"): - string = string[0:-1] - pos = string.find("://") - if pos >= 0: - string = string[pos + 3:] - string = string.replace("/", "_") - return string - - -def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): - for (name, omirror) in old_mirrors.iteritems(): - nmirror = new_mirrors.get(name) - if not nmirror: - continue - oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) - nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror)) - if oprefix == nprefix: - continue - olen = len(oprefix) - for filename in glob.glob("%s_*" % oprefix): - util.rename(filename, "%s%s" % (nprefix, filename[olen:])) - - -def get_release(): - (stdout, _stderr) = util.subp(['lsb_release', '-cs']) - return stdout.strip() - - -def generate_sources_list(codename, mirrors, cloud, log): - template_fn = cloud.get_template_filename('sources.list') - if not template_fn: - log.warn("No template found, not rendering /etc/apt/sources.list") - return - - params = {'codename': codename} - for k in mirrors: - params[k] = mirrors[k] - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) - - -def add_sources(cloud, srclist, template_params=None): - """ - add entries in /etc/apt/sources.list.d for each abbreviated - sources.list entry in 'srclist'. When rendering template, also - include the values in dictionary searchList - """ - if template_params is None: - template_params = {} - - errorlist = [] - for ent in srclist: - if 'source' not in ent: - errorlist.append(["", "missing source"]) - continue - - source = ent['source'] - if source.startswith("ppa:"): - try: - util.subp(["add-apt-repository", source]) - except: - errorlist.append([source, "add-apt-repository failed"]) - continue - - source = templater.render_string(source, template_params) - - if 'filename' not in ent: - ent['filename'] = 'cloud_config_sources.list' - - if not ent['filename'].startswith("/"): - ent['filename'] = os.path.join("/etc/apt/sources.list.d/", - ent['filename']) - - if ('keyid' in ent and 'key' not in ent): - ks = "keyserver.ubuntu.com" - if 'keyserver' in ent: - ks = ent['keyserver'] - try: - ent['key'] = getkeybyid(ent['keyid'], ks) - except: - errorlist.append([source, "failed to get key from %s" % ks]) - continue - - if 'key' in ent: - try: - util.subp(('apt-key', 'add', '-'), ent['key']) - except: - errorlist.append([source, "failed add key"]) - - try: - contents = "%s\n" % (source) - util.write_file(cloud.paths.join(False, ent['filename']), - contents, omode="ab") - except: - errorlist.append([source, - "failed write to file %s" % ent['filename']]) - - return errorlist - - -def find_apt_mirror_info(cloud, cfg): - """find an apt_mirror given the cloud and cfg provided.""" - - mirror = None - - # this is less preferred way of specifying mirror preferred would be to - # use the distro's search or package_mirror. - mirror = cfg.get("apt_mirror", None) - - search = cfg.get("apt_mirror_search", None) - if not mirror and search: - mirror = util.search_for_mirror(search) - - if (not mirror and - util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): - mydom = "" - doms = [] - - # if we have a fqdn, then search its domain portion first - (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) - mydom = ".".join(fqdn.split(".")[1:]) - if mydom: - doms.append(".%s" % mydom) - - doms.extend((".localdomain", "",)) - - mirror_list = [] - distro = cloud.distro.name - mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) - for post in doms: - mirror_list.append(mirrorfmt % (post)) - - mirror = util.search_for_mirror(mirror_list) - - mirror_info = cloud.datasource.get_package_mirror_info() - - # this is a bit strange. - # if mirror is set, then one of the legacy options above set it - # but they do not cover security. so we need to get that from - # get_package_mirror_info - if mirror: - mirror_info.update({'primary': mirror}) - - return mirror_info diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py new file mode 100644 index 00000000..e319e147 --- /dev/null +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -0,0 +1,110 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from logging import StreamHandler +import os +import time + +from cloudinit import util + +REBOOT_FILE = "/var/run/reboot-required" +REBOOT_CMD = ["/sbin/reboot"] + + +def _multi_cfg_bool_get(cfg, *keys): + for k in keys: + if util.get_cfg_option_bool(cfg, k, False): + return True + return False + + +def _flush_loggers(root): + for h in root.handlers: + if isinstance(h, (StreamHandler)): + try: + h.flush() + except IOError: + pass + if root.parent: + _flush_loggers(root.parent) + + +def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): + util.subp(REBOOT_CMD) + start = time.time() + wait_time = initial_sleep + for _i in range(0, wait_attempts): + time.sleep(wait_time) + wait_time *= backoff + elapsed = time.time() - start + log.debug("Rebooted, but still running after %s seconds", int(elapsed)) + # If we got here, not good + elapsed = time.time() - start + raise RuntimeError(("Reboot did not happen" + " after %s seconds!") % (int(elapsed))) + + +def handle(_name, cfg, cloud, log, _args): + # Handle the old style + new config names + update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update') + upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade') + reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required', + 'package_reboot_if_required') + pkglist = util.get_cfg_option_list(cfg, 'packages', []) + + errors = [] + if update or len(pkglist) or upgrade: + try: + cloud.distro.update_package_sources() + except Exception as e: + util.logexc(log, "Package update failed") + errors.append(e) + + if upgrade: + try: + cloud.distro.package_command("upgrade") + except Exception as e: + util.logexc(log, "Package upgrade failed") + errors.append(e) + + if len(pkglist): + try: + cloud.distro.install_packages(pkglist) + except Exception as e: + util.logexc(log, "Failed to install packages: %s", pkglist) + errors.append(e) + + # TODO(smoser): handle this less violently + # kernel and openssl (possibly some other packages) + # write a file /var/run/reboot-required after upgrading. + # if that file exists and configured, then just stop right now and reboot + reboot_fn_exists = os.path.isfile(REBOOT_FILE) + if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: + try: + log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE) + # Flush the above warning + anything else out... + _flush_loggers(log) + _fire_reboot(log) + except Exception as e: + util.logexc(log, "Requested reboot did not happen!") + errors.append(e) + + if len(errors): + log.warn("%s failed with exceptions, re-raising the last one", + len(errors)) + raise errors[-1] -- cgit v1.2.3 From 85a412c172044ae89d381f69ddb309ce8b3cea6e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 21:14:51 -0700 Subject: Move the recursive flushing to the log module. --- cloudinit/config/cc_package_update_upgrade_install.py | 15 ++------------- cloudinit/log.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index e319e147..73b0e30d 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -16,10 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from logging import StreamHandler import os import time +from cloudinit import log as logging from cloudinit import util REBOOT_FILE = "/var/run/reboot-required" @@ -33,17 +33,6 @@ def _multi_cfg_bool_get(cfg, *keys): return False -def _flush_loggers(root): - for h in root.handlers: - if isinstance(h, (StreamHandler)): - try: - h.flush() - except IOError: - pass - if root.parent: - _flush_loggers(root.parent) - - def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): util.subp(REBOOT_CMD) start = time.time() @@ -98,7 +87,7 @@ def handle(_name, cfg, cloud, log, _args): try: log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE) # Flush the above warning + anything else out... - _flush_loggers(log) + logging.flushLoggers(log) _fire_reboot(log) except Exception as e: util.logexc(log, "Requested reboot did not happen!") diff --git a/cloudinit/log.py b/cloudinit/log.py index 2333e5ee..da6c2851 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -53,6 +53,18 @@ def setupBasicLogging(): root.setLevel(DEBUG) +def flushLoggers(root): + if not root: + return + for h in root.handlers: + if isinstance(h, (logging.StreamHandler)): + try: + h.flush() + except IOError: + pass + flushLoggers(root.parent) + + def setupLogging(cfg=None): # See if the config provides any logging conf... if not cfg: -- cgit v1.2.3 From 8e38c908da002b350d26845ffb5931b4efce1ca8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 21:20:19 -0700 Subject: Add in a created by cloud-init header. --- cloudinit/config/cc_yum_add_repo.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index fda3236f..ee117bcd 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -50,6 +50,7 @@ def _format_repository_config(repo_id, repo_config): # the format of yum and don't verify further to_be[repo_id][k] = v lines = to_be.write() + lines.insert(0, util.make_header()) return "\n".join(lines) -- cgit v1.2.3 From 0b73122b6a7c63d4ab1b117c6c0c136876fefd16 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 17 Oct 2012 09:32:35 -0700 Subject: Make the yum value 'string' translation a function. --- cloudinit/config/cc_yum_add_repo.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index ee117bcd..123f7827 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -29,6 +29,19 @@ def _canonicalize_id(repo_id): return repo_id +def _format_repo_value(val): + if isinstance(val, (bool)): + # Seems like yum prefers 1/0 + return str(int(val)) + if isinstance(val, (list, tuple)): + # Can handle 'lists' in certain cases + # See: http://bit.ly/Qqrf1t + return "\n ".join([_format_repo_value(v) for v in val]) + if not isinstance(val, (basestring, str)): + return str(val) + return val + + ## TODO(harlowja): move to distro? # See man yum.conf def _format_repository_config(repo_id, repo_config): @@ -36,19 +49,9 @@ def _format_repository_config(repo_id, repo_config): to_be[repo_id] = {} # Do basic translation of the items -> values for (k, v) in repo_config.items(): - if isinstance(v, bool): - # Seems like yum prefers 1/0 - if v: - v = '1' - else: - v = '0' - elif isinstance(v, (tuple, list)): - # Can handle 'lists' in certain cases - # See: http://bit.ly/Qqrf1t - v = "\n ".join(v) - # For now assume that peopel using this know - # the format of yum and don't verify further - to_be[repo_id][k] = v + # For now assume that people using this know + # the format of yum and don't verify keys/values further + to_be[repo_id][k] = _format_repo_value(v) lines = to_be.write() lines.insert(0, util.make_header()) return "\n".join(lines) -- cgit v1.2.3 From 04f52eb593e4f5114626c74fd8f3c5a9a8d440bd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 17 Oct 2012 09:42:22 -0700 Subject: More cleanups, fix header function. --- cloudinit/config/cc_yum_add_repo.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 123f7827..5c273825 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -53,7 +53,7 @@ def _format_repository_config(repo_id, repo_config): # the format of yum and don't verify keys/values further to_be[repo_id][k] = _format_repo_value(v) lines = to_be.write() - lines.insert(0, util.make_header()) + lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822())) return "\n".join(lines) @@ -87,12 +87,19 @@ def handle(name, cfg, _cloud, log, _args): if k: n_repo_config[k] = v repo_config = n_repo_config - if not 'baseurl' in repo_config: - log.warn("Repository %s does not contain a baseurl address", - repo_id) - else: + missing_required = 0 + for req_field in ['baseurl']: + if not req_field in repo_config: + log.warn(("Repository %s does not contain a %s" + " configuration 'required' entry"), + repo_id, req_field) + missing_required += 1 + if not missing_required: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth + else: + log.warn("Repository %s is missing %s required fields, skipping!", + repo_id, missing_required) for (c_repo_id, path) in repo_locations.items(): repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) -- cgit v1.2.3 From aa8b51a48a30e3a3c863ca0ddb8bc4667026d57a Mon Sep 17 00:00:00 2001 From: harlowja Date: Sat, 27 Oct 2012 19:25:48 -0700 Subject: Helpful cleanups. 1. Remove the usage of the path.join function now that all code should be going through the util file methods (and they can be mocked out as needed). 2. Adjust all occurences of the above join function to either not use it or replace it with the standard os.path.join (which can also be mocked out as needed) 3. Fix pylint from complaining about the tests folder 'helpers.py' not being found 4. Add a pylintrc file that is used instead of the options hidden in the 'run_pylint' tool. --- Makefile | 8 +-- cloudinit/config/cc_apt_pipelining.py | 12 ++-- cloudinit/config/cc_apt_update_upgrade.py | 13 ++-- cloudinit/config/cc_ca_certs.py | 24 ++++---- cloudinit/config/cc_chef.py | 30 +++++----- cloudinit/config/cc_landscape.py | 14 ++--- cloudinit/config/cc_mcollective.py | 22 +++---- cloudinit/config/cc_mounts.py | 9 ++- cloudinit/config/cc_phone_home.py | 4 +- cloudinit/config/cc_puppet.py | 70 ++++++++++------------ cloudinit/config/cc_resizefs.py | 5 +- cloudinit/config/cc_rsyslog.py | 3 +- cloudinit/config/cc_runcmd.py | 2 +- cloudinit/config/cc_salt_minion.py | 6 +- cloudinit/config/cc_set_passwords.py | 6 +- cloudinit/config/cc_ssh.py | 16 +++-- cloudinit/config/cc_ssh_authkey_fingerprints.py | 7 +-- cloudinit/config/cc_update_etc_hosts.py | 3 +- cloudinit/distros/__init__.py | 8 +-- cloudinit/distros/debian.py | 26 +++----- cloudinit/helpers.py | 29 +-------- cloudinit/sources/__init__.py | 2 - cloudinit/ssh_util.py | 26 ++++---- pylintrc | 19 ++++++ tests/__init__.py | 0 tests/unittests/__init__.py | 0 tests/unittests/test_datasource/__init__.py | 0 tests/unittests/test_distros/__init__.py | 0 tests/unittests/test_filters/__init__.py | 0 tests/unittests/test_filters/test_launch_index.py | 10 +--- tests/unittests/test_handler/__init__.py | 0 .../test_handler/test_handler_ca_certs.py | 18 +++--- tests/unittests/test_runs/__init__.py | 0 tests/unittests/test_runs/test_simple_run.py | 10 +--- tools/run-pylint | 19 ++---- 35 files changed, 170 insertions(+), 251 deletions(-) create mode 100644 pylintrc create mode 100644 tests/__init__.py create mode 100644 tests/unittests/__init__.py create mode 100644 tests/unittests/test_datasource/__init__.py create mode 100644 tests/unittests/test_distros/__init__.py create mode 100644 tests/unittests/test_filters/__init__.py create mode 100644 tests/unittests/test_handler/__init__.py create mode 100644 tests/unittests/test_runs/__init__.py diff --git a/Makefile b/Makefile index 49324ca0..8f5646b7 100644 --- a/Makefile +++ b/Makefile @@ -1,20 +1,20 @@ CWD=$(shell pwd) -PY_FILES=$(shell find cloudinit bin tests tools -name "*.py") +PY_FILES=$(shell find cloudinit bin tests tools -type f -name "*.py") PY_FILES+="bin/cloud-init" all: test pep8: - $(CWD)/tools/run-pep8 $(PY_FILES) + @$(CWD)/tools/run-pep8 $(PY_FILES) pylint: - $(CWD)/tools/run-pylint $(PY_FILES) + @$(CWD)/tools/run-pylint $(PY_FILES) pyflakes: pyflakes $(PY_FILES) test: - nosetests $(noseopts) tests/unittests/ + @nosetests $(noseopts) tests/ 2to3: 2to3 $(PY_FILES) diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 02056ee0..e5629175 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -34,26 +34,24 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" # on TCP connections - otherwise data corruption will occur. -def handle(_name, cfg, cloud, log, _args): +def handle(_name, cfg, _cloud, log, _args): apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": - write_apt_snippet(cloud, "0", log, DEFAULT_FILE) + write_apt_snippet("0", log, DEFAULT_FILE) elif apt_pipe_value_s in ("none", "unchanged", "os"): return elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: - write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE) + write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) -def write_apt_snippet(cloud, setting, log, f_name): +def write_apt_snippet(setting, log, f_name): """Writes f_name with apt pipeline depth 'setting'.""" file_contents = APT_PIPE_TPL % (setting) - - util.write_file(cloud.paths.join(False, f_name), file_contents) - + util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 356bb98d..59c34b59 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -78,8 +78,7 @@ def handle(name, cfg, cloud, log, _args): try: # See man 'apt.conf' contents = PROXY_TPL % (proxy) - util.write_file(cloud.paths.join(False, proxy_filename), - contents) + util.write_file(proxy_filename, contents) except Exception as e: util.logexc(log, "Failed to write proxy to %s", proxy_filename) elif os.path.isfile(proxy_filename): @@ -90,7 +89,7 @@ def handle(name, cfg, cloud, log, _args): params = mirrors params['RELEASE'] = release params['MIRROR'] = mirror - errors = add_sources(cloud, cfg['apt_sources'], params) + errors = add_sources(cfg['apt_sources'], params) for e in errors: log.warn("Source Error: %s", ':'.join(e)) @@ -196,11 +195,10 @@ def generate_sources_list(codename, mirrors, cloud, log): params = {'codename': codename} for k in mirrors: params[k] = mirrors[k] - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/apt/sources.list', params) -def add_sources(cloud, srclist, template_params=None): +def add_sources(srclist, template_params=None): """ add entries in /etc/apt/sources.list.d for each abbreviated sources.list entry in 'srclist'. When rendering template, also @@ -250,8 +248,7 @@ def add_sources(cloud, srclist, template_params=None): try: contents = "%s\n" % (source) - util.write_file(cloud.paths.join(False, ent['filename']), - contents, omode="ab") + util.write_file(ent['filename'], contents, omode="ab") except: errorlist.append([source, "failed write to file %s" % ent['filename']]) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index dc046bda..20f24357 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -22,6 +22,7 @@ CA_CERT_PATH = "/usr/share/ca-certificates/" CA_CERT_FILENAME = "cloud-init-ca-certs.crt" CA_CERT_CONFIG = "/etc/ca-certificates.conf" CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" +CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) distros = ['ubuntu', 'debian'] @@ -33,7 +34,7 @@ def update_ca_certs(): util.subp(["update-ca-certificates"], capture=False) -def add_ca_certs(paths, certs): +def add_ca_certs(certs): """ Adds certificates to the system. To actually apply the new certificates you must also call L{update_ca_certs}. @@ -43,27 +44,24 @@ def add_ca_certs(paths, certs): if certs: # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) - cert_file_fullpath = paths.join(False, cert_file_fullpath) - util.write_file(cert_file_fullpath, cert_file_contents, mode=0644) + util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644) # Append cert filename to CA_CERT_CONFIG file. - util.write_file(paths.join(False, CA_CERT_CONFIG), - "\n%s" % CA_CERT_FILENAME, omode="ab") + util.write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="ab") -def remove_default_ca_certs(paths): +def remove_default_ca_certs(): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. """ - util.delete_dir_contents(paths.join(False, CA_CERT_PATH)) - util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH)) - util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644) + util.delete_dir_contents(CA_CERT_PATH) + util.delete_dir_contents(CA_CERT_SYSTEM_PATH) + util.write_file(CA_CERT_CONFIG, "", mode=0644) debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" util.subp(('debconf-set-selections', '-'), debconf_sel) -def handle(name, cfg, cloud, log, _args): +def handle(name, cfg, _cloud, log, _args): """ Call to handle ca-cert sections in cloud-config file. @@ -85,14 +83,14 @@ def handle(name, cfg, cloud, log, _args): # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs(cloud.paths) + remove_default_ca_certs() # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") if trusted_certs: log.debug("Adding %d certificates" % len(trusted_certs)) - add_ca_certs(cloud.paths, trusted_certs) + add_ca_certs(trusted_certs) # Update the system with the new cert configuration. log.debug("Updating certificates") diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 6f568261..7a3d6a31 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -26,6 +26,15 @@ from cloudinit import util RUBY_VERSION_DEFAULT = "1.8" +CHEF_DIRS = [ + '/etc/chef', + '/var/log/chef', + '/var/lib/chef', + '/var/cache/chef', + '/var/backups/chef', + '/var/run/chef', +] + def handle(name, cfg, cloud, log, _args): @@ -37,24 +46,15 @@ def handle(name, cfg, cloud, log, _args): chef_cfg = cfg['chef'] # Ensure the chef directories we use exist - c_dirs = [ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', - ] - for d in c_dirs: - util.ensure_dir(cloud.paths.join(False, d)) + for d in CHEF_DIRS: + util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: - v_fn = cloud.paths.join(False, '/etc/chef/validation.pem') - util.write_file(v_fn, chef_cfg[key]) + util.write_file('/etc/chef/validation.pem', chef_cfg[key]) break # Create the chef config from template @@ -68,8 +68,7 @@ def handle(name, cfg, cloud, log, _args): '_default'), 'validation_name': chef_cfg['validation_name'] } - out_fn = cloud.paths.join(False, '/etc/chef/client.rb') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/chef/client.rb', params) else: log.warn("No template found, not rendering to /etc/chef/client.rb") @@ -81,8 +80,7 @@ def handle(name, cfg, cloud, log, _args): initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] - firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json') - util.write_file(firstboot_fn, json.dumps(initial_json)) + util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' if not os.path.isfile('/usr/bin/chef-client'): diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 56ab0ce3..02610dd0 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -66,22 +66,16 @@ def handle(_name, cfg, cloud, log, _args): merge_data = [ LSC_BUILTIN_CFG, - cloud.paths.join(True, LSC_CLIENT_CFG_FILE), + LSC_CLIENT_CFG_FILE, ls_cloudcfg, ] merged = merge_together(merge_data) - - lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE) - lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn)) - if not os.path.isdir(lsc_dir): - util.ensure_dir(lsc_dir) - contents = StringIO() merged.write(contents) - contents.flush() - util.write_file(lsc_client_fn, contents.getvalue()) - log.debug("Wrote landscape config file to %s", lsc_client_fn) + util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) + util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue()) + log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE) util.write_file(LS_DEFAULT_FILE, "RUN=1\n") util.subp(["service", "landscape-client", "restart"]) diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 2acdbc6f..b670390d 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -29,6 +29,7 @@ from cloudinit import util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" +SERVER_CFG = '/etc/mcollective/server.cfg' def handle(name, cfg, cloud, log, _args): @@ -48,26 +49,23 @@ def handle(name, cfg, cloud, log, _args): if 'conf' in mcollective_cfg: # Read server.cfg values from the # original file in order to be able to mix the rest up - server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg') - mcollective_config = ConfigObj(server_cfg_fn) + mcollective_config = ConfigObj(SERVER_CFG) # See: http://tiny.cc/jh9agw for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): if cfg_name == 'public-cert': - pubcert_fn = cloud.paths.join(True, PUBCERT_FILE) - util.write_file(pubcert_fn, cfg, mode=0644) - mcollective_config['plugin.ssl_server_public'] = pubcert_fn + util.write_file(PUBCERT_FILE, cfg, mode=0644) + mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE mcollective_config['securityprovider'] = 'ssl' elif cfg_name == 'private-cert': - pricert_fn = cloud.paths.join(True, PRICERT_FILE) - util.write_file(pricert_fn, cfg, mode=0600) - mcollective_config['plugin.ssl_server_private'] = pricert_fn + util.write_file(PRICERT_FILE, cfg, mode=0600) + mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE mcollective_config['securityprovider'] = 'ssl' else: if isinstance(cfg, (basestring, str)): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): - # Iterate throug the config items, create a section + # Iterate through the config items, create a section # if it is needed and then add/or create items as needed if cfg_name not in mcollective_config.sections: mcollective_config[cfg_name] = {} @@ -78,14 +76,12 @@ def handle(name, cfg, cloud, log, _args): mcollective_config[cfg_name] = str(cfg) # We got all our config as wanted we'll rename # the previous server.cfg and create our new one - old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old') - util.rename(server_cfg_fn, old_fn) + util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG)) # Now we got the whole file, write to disk... contents = StringIO() mcollective_config.write(contents) contents = contents.getvalue() - server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg') - util.write_file(server_cfg_rw, contents, mode=0644) + util.write_file(SERVER_CFG, contents, mode=0644) # Start mcollective util.subp(['service', 'mcollective', 'start'], capture=False) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 14c965bb..cb772c86 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -28,6 +28,7 @@ from cloudinit import util SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$" SHORTNAME = re.compile(SHORTNAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) +FSTAB_PATH = "/etc/fstab" def is_mdname(name): @@ -167,8 +168,7 @@ def handle(_name, cfg, cloud, log, _args): cc_lines.append('\t'.join(line)) fstab_lines = [] - fstab = util.load_file(cloud.paths.join(True, "/etc/fstab")) - for line in fstab.splitlines(): + for line in util.load_file(FSTAB_PATH).splitlines(): try: toks = WS.split(line) if toks[3].find(comment) != -1: @@ -179,7 +179,7 @@ def handle(_name, cfg, cloud, log, _args): fstab_lines.extend(cc_lines) contents = "%s\n" % ('\n'.join(fstab_lines)) - util.write_file(cloud.paths.join(False, "/etc/fstab"), contents) + util.write_file(FSTAB_PATH, contents) if needswap: try: @@ -188,9 +188,8 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Activating swap via 'swapon -a' failed") for d in dirs: - real_dir = cloud.paths.join(False, d) try: - util.ensure_dir(real_dir) + util.ensure_dir(d) except: util.logexc(log, "Failed to make '%s' config-mount", d) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index ae1349eb..886487f8 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -84,10 +84,10 @@ def handle(name, cfg, cloud, log, args): for (n, path) in pubkeys.iteritems(): try: - all_keys[n] = util.load_file(cloud.paths.join(True, path)) + all_keys[n] = util.load_file(path) except: util.logexc(log, ("%s: failed to open, can not" - " phone home that data"), path) + " phone home that data!"), path) submit_keys = {} for k in post_list: diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 74ee18e1..8fe3af57 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -21,12 +21,32 @@ from StringIO import StringIO import os -import pwd import socket from cloudinit import helpers from cloudinit import util +PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' +PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/' +PUPPET_SSL_DIR = '/var/lib/puppet/ssl' +PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem' + + +def _autostart_puppet(log): + # Set puppet to automatically start + if os.path.exists('/etc/default/puppet'): + util.subp(['sed', '-i', + '-e', 's/^START=.*/START=yes/', + '/etc/default/puppet'], capture=False) + elif os.path.exists('/bin/systemctl'): + util.subp(['/bin/systemctl', 'enable', 'puppet.service'], + capture=False) + elif os.path.exists('/sbin/chkconfig'): + util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + else: + log.warn(("Sorry we do not know how to enable" + " puppet services on this system")) + def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything @@ -43,8 +63,7 @@ def handle(name, cfg, cloud, log, _args): # ... and then update the puppet configuration if 'conf' in puppet_cfg: # Add all sections from the conf object to puppet.conf - puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf') - contents = util.load_file(puppet_conf_fn) + contents = util.load_file(PUPPET_CONF_PATH) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to @@ -53,28 +72,19 @@ def handle(name, cfg, cloud, log, _args): cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), - filename=puppet_conf_fn) + filename=PUPPET_CONF_PATH) for (cfg_name, cfg) in puppet_cfg['conf'].iteritems(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership - pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl') - util.ensure_dir(pp_ssl_dir, 0771) - util.chownbyid(pp_ssl_dir, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_certs = cloud.paths.join(False, - '/var/lib/puppet/ssl/certs/') - util.ensure_dir(pp_ssl_certs) - util.chownbyid(pp_ssl_certs, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_ca_certs = cloud.paths.join(False, - ('/var/lib/puppet/' - 'ssl/certs/ca.pem')) - util.write_file(pp_ssl_ca_certs, cfg) - util.chownbyid(pp_ssl_ca_certs, - pwd.getpwnam('puppet').pw_uid, 0) + util.ensure_dir(PUPPET_SSL_DIR, 0771) + util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') + util.ensure_dir(PUPPET_SSL_CERT_DIR) + util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') + util.write_file(PUPPET_SSL_CERT_PATH, str(cfg)) + util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') else: # Iterate throug the config items, we'll use ConfigParser.set # to overwrite or create new items as needed @@ -90,25 +100,11 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - conf_old_fn = cloud.paths.join(False, - '/etc/puppet/puppet.conf.old') - util.rename(puppet_conf_fn, conf_old_fn) - puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf') - util.write_file(puppet_conf_rw, puppet_config.stringify()) + util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH)) + util.write_file(PUPPET_CONF_PATH, puppet_config.stringify()) - # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - util.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - util.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) - else: - log.warn(("Sorry we do not know how to enable" - " puppet services on this system")) + # Set it up so it autostarts + _autostart_puppet(log) # Start puppetd util.subp(['service', 'puppet', 'start'], capture=False) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index e7f27944..b958f332 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -62,7 +62,7 @@ def get_fs_type(st_dev, path, log): raise -def handle(name, cfg, cloud, log, args): +def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] else: @@ -74,11 +74,10 @@ def handle(name, cfg, cloud, log, args): # TODO(harlowja) is the directory ok to be used?? resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") - resize_root_d = cloud.paths.join(False, resize_root_d) util.ensure_dir(resize_root_d) # TODO(harlowja): allow what is to be resized to be configurable?? - resize_what = cloud.paths.join(False, "/") + resize_what = "/" with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", dir=resize_root_d, delete=True) as tfh: devpth = tfh.name diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 78327526..0c2c6880 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -71,8 +71,7 @@ def handle(name, cfg, cloud, log, _args): try: contents = "%s\n" % (content) - util.write_file(cloud.paths.join(False, filename), - contents, omode=omode) + util.write_file(filename, contents, omode=omode) except Exception: util.logexc(log, "Failed to write to %s", filename) diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 65064cfb..598c3a3e 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args): cmd = cfg["runcmd"] try: content = util.shellify(cmd) - util.write_file(cloud.paths.join(False, out_fn), content, 0700) + util.write_file(out_fn, content, 0700) except: util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn) diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index 8a1440d9..f3eede18 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -34,8 +34,7 @@ def handle(name, cfg, cloud, log, _args): cloud.distro.install_packages(["salt-minion"]) # Ensure we can configure files at the right dir - config_dir = cloud.paths.join(False, salt_cfg.get("config_dir", - '/etc/salt')) + config_dir = salt_cfg.get("config_dir", '/etc/salt') util.ensure_dir(config_dir) # ... and then update the salt configuration @@ -47,8 +46,7 @@ def handle(name, cfg, cloud, log, _args): # ... copy the key pair if specified if 'public_key' in salt_cfg and 'private_key' in salt_cfg: - pki_dir = cloud.paths.join(False, salt_cfg.get('pki_dir', - '/etc/salt/pki')) + pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki') with util.umask(077): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 26c558ad..c6bf62fd 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -114,8 +114,7 @@ def handle(_name, cfg, cloud, log, args): replaced_auth = False # See: man sshd_config - conf_fn = cloud.paths.join(True, ssh_util.DEF_SSHD_CFG) - old_lines = ssh_util.parse_ssh_config(conf_fn) + old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) new_lines = [] i = 0 for (i, line) in enumerate(old_lines): @@ -134,8 +133,7 @@ def handle(_name, cfg, cloud, log, args): pw_auth)) lines = [str(e) for e in new_lines] - ssh_rw_fn = cloud.paths.join(False, ssh_util.DEF_SSHD_CFG) - util.write_file(ssh_rw_fn, "\n".join(lines)) + util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: cmd = ['service'] diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 32e48c30..b623d476 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -59,7 +59,7 @@ def handle(_name, cfg, cloud, log, _args): # remove the static keys from the pristine image if cfg.get("ssh_deletekeys", True): - key_pth = cloud.paths.join(False, "/etc/ssh/", "ssh_host_*key*") + key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*") for f in glob.glob(key_pth): try: util.del_file(f) @@ -72,8 +72,7 @@ def handle(_name, cfg, cloud, log, _args): if key in KEY_2_FILE: tgt_fn = KEY_2_FILE[key][0] tgt_perms = KEY_2_FILE[key][1] - util.write_file(cloud.paths.join(False, tgt_fn), - val, tgt_perms) + util.write_file(tgt_fn, val, tgt_perms) for (priv, pub) in PRIV_2_PUB.iteritems(): if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: @@ -94,7 +93,7 @@ def handle(_name, cfg, cloud, log, _args): 'ssh_genkeytypes', GENERATE_KEY_NAMES) for keytype in genkeys: - keyfile = cloud.paths.join(False, KEY_FILE_TPL % (keytype)) + keyfile = KEY_FILE_TPL % (keytype) util.ensure_dir(os.path.dirname(keyfile)) if not os.path.exists(keyfile): cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] @@ -118,17 +117,16 @@ def handle(_name, cfg, cloud, log, _args): cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) - apply_credentials(keys, user, cloud.paths, - disable_root, disable_root_opts) + apply_credentials(keys, user, disable_root, disable_root_opts) except: util.logexc(log, "Applying ssh credentials failed!") -def apply_credentials(keys, user, paths, disable_root, disable_root_opts): +def apply_credentials(keys, user, disable_root, disable_root_opts): keys = set(keys) if user: - ssh_util.setup_user_keys(keys, user, '', paths) + ssh_util.setup_user_keys(keys, user, '') if disable_root: if not user: @@ -137,4 +135,4 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts): else: key_prefix = '' - ssh_util.setup_user_keys(keys, 'root', key_prefix, paths) + ssh_util.setup_user_keys(keys, 'root', key_prefix) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 8c9a8806..c38bcea2 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -97,9 +97,8 @@ def handle(name, cfg, cloud, log, _args): "logging of ssh fingerprints disabled"), name) hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") - extract_func = ssh_util.extract_authorized_keys (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): - (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) - _pprint_key_entries(user_name, auth_key_fn, - auth_key_entries, hash_meth) + (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name) + _pprint_key_entries(user_name, key_fn, + key_entries, hash_meth) diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 4d75000f..96103615 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -42,8 +42,7 @@ def handle(name, cfg, cloud, log, _args): raise RuntimeError(("No hosts template could be" " found for distro %s") % (cloud.distro.name)) - out_fn = cloud.paths.join(False, '/etc/hosts') - templater.render_to_file(tpl_fn_name, out_fn, + templater.render_to_file(tpl_fn_name, '/etc/hosts', {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2fbb0e9b..869540d2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -122,8 +122,7 @@ class Distro(object): new_etchosts = StringIO() need_write = False need_change = True - hosts_ro_fn = self._paths.join(True, "/etc/hosts") - for line in util.load_file(hosts_ro_fn).splitlines(): + for line in util.load_file("/etc/hosts").splitlines(): if line.strip().startswith(header): continue if not line.strip() or line.strip().startswith("#"): @@ -147,8 +146,7 @@ class Distro(object): need_write = True if need_write: contents = new_etchosts.getvalue() - util.write_file(self._paths.join(False, "/etc/hosts"), - contents, mode=0644) + util.write_file("/etc/hosts", contents, mode=0644) def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] @@ -262,7 +260,7 @@ class Distro(object): # Import SSH keys if 'ssh_authorized_keys' in kwargs: keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, None, self._paths) + ssh_util.setup_user_keys(keys, name, key_prefix=None) return True diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 88f4e978..cc7e53a0 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -43,7 +43,7 @@ class Distro(distros.Distro): def apply_locale(self, locale, out_fn=None): if not out_fn: - out_fn = self._paths.join(False, '/etc/default/locale') + out_fn = '/etc/default/locale' util.subp(['locale-gen', locale], capture=False) util.subp(['update-locale', locale], capture=False) lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""] @@ -54,8 +54,7 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _write_network(self, settings): - net_fn = self._paths.join(False, "/etc/network/interfaces") - util.write_file(net_fn, settings) + util.write_file("/etc/network/interfaces", settings) return ['all'] def _bring_up_interfaces(self, device_names): @@ -69,12 +68,9 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_hostname(self, hostname): - out_fn = self._paths.join(False, "/etc/hostname") - self._write_hostname(hostname, out_fn) - if out_fn == '/etc/hostname': - # Only do this if we are running in non-adjusted root mode - LOG.debug("Setting hostname to %s", hostname) - util.subp(['hostname', hostname]) + self._write_hostname(hostname, "/etc/hostname") + LOG.debug("Setting hostname to %s", hostname) + util.subp(['hostname', hostname]) def _write_hostname(self, hostname, out_fn): # "" gives trailing newline. @@ -82,16 +78,14 @@ class Distro(distros.Distro): def update_hostname(self, hostname, prev_fn): hostname_prev = self._read_hostname(prev_fn) - read_fn = self._paths.join(True, "/etc/hostname") - hostname_in_etc = self._read_hostname(read_fn) + hostname_in_etc = self._read_hostname("/etc/hostname") update_files = [] if not hostname_prev or hostname_prev != hostname: update_files.append(prev_fn) if (not hostname_in_etc or (hostname_in_etc == hostname_prev and hostname_in_etc != hostname)): - write_fn = self._paths.join(False, "/etc/hostname") - update_files.append(write_fn) + update_files.append("/etc/hostname") for fn in update_files: try: self._write_hostname(hostname, fn) @@ -103,7 +97,6 @@ class Distro(distros.Distro): LOG.debug(("%s differs from /etc/hostname." " Assuming user maintained hostname."), prev_fn) if "/etc/hostname" in update_files: - # Only do this if we are running in non-adjusted root mode LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -130,9 +123,8 @@ class Distro(distros.Distro): " no file found at %s") % (tz, tz_file)) # "" provides trailing newline during join tz_lines = ["# Created by cloud-init", str(tz), ""] - tz_fn = self._paths.join(False, "/etc/timezone") - util.write_file(tz_fn, "\n".join(tz_lines)) - util.copy(tz_file, self._paths.join(False, "/etc/localtime")) + util.write_file("/etc/timezone", "\n".join(tz_lines)) + util.copy(tz_file, "/etc/localtime") def package_command(self, command, args=None): e = os.environ.copy() diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index a4b20208..985ce3e5 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -302,14 +302,10 @@ class Paths(object): def __init__(self, path_cfgs, ds=None): self.cfgs = path_cfgs # Populate all the initial paths - self.cloud_dir = self.join(False, - path_cfgs.get('cloud_dir', - '/var/lib/cloud')) + self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud') self.instance_link = os.path.join(self.cloud_dir, 'instance') self.boot_finished = os.path.join(self.instance_link, "boot-finished") self.upstart_conf_d = path_cfgs.get('upstart_dir') - if self.upstart_conf_d: - self.upstart_conf_d = self.join(False, self.upstart_conf_d) self.seed_dir = os.path.join(self.cloud_dir, 'seed') # This one isn't joined, since it should just be read-only template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/') @@ -328,29 +324,6 @@ class Paths(object): # Set when a datasource becomes active self.datasource = ds - # joins the paths but also appends a read - # or write root if available - def join(self, read_only, *paths): - if read_only: - root = self.cfgs.get('read_root') - else: - root = self.cfgs.get('write_root') - if not paths: - return root - if len(paths) > 1: - joined = os.path.join(*paths) - else: - joined = paths[0] - if root: - pre_joined = joined - # Need to remove any starting '/' since this - # will confuse os.path.join - joined = joined.lstrip("/") - joined = os.path.join(root, joined) - LOG.debug("Translated %s to adjusted path %s (read-only=%s)", - pre_joined, joined, read_only) - return joined - # get_ipath_cur: get the current instance path for an item def get_ipath_cur(self, name=None): ipath = self.instance_link diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index b22369a8..745627d0 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -20,8 +20,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from email.mime.multipart import MIMEMultipart - import abc import os diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 88a11a1a..dd6b742f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -212,17 +212,15 @@ def update_authorized_keys(old_entries, keys): return '\n'.join(lines) -def users_ssh_info(username, paths): +def users_ssh_info(username): pw_ent = pwd.getpwnam(username) - if not pw_ent: + if not pw_ent or not pw_ent.pw_dir: raise RuntimeError("Unable to get ssh info for user %r" % (username)) - ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh')) - return (ssh_dir, pw_ent) + return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent) -def extract_authorized_keys(username, paths): - (ssh_dir, pw_ent) = users_ssh_info(username, paths) - sshd_conf_fn = paths.join(True, DEF_SSHD_CFG) +def extract_authorized_keys(username): + (ssh_dir, pw_ent) = users_ssh_info(username) auth_key_fn = None with util.SeLinuxGuard(ssh_dir, recursive=True): try: @@ -231,7 +229,7 @@ def extract_authorized_keys(username, paths): # The following tokens are defined: %% is replaced by a literal # '%', %h is replaced by the home directory of the user being # authenticated and %u is replaced by the username of that user. - ssh_cfg = parse_ssh_config_map(sshd_conf_fn) + ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG) auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip() if not auth_key_fn: auth_key_fn = "%h/.ssh/authorized_keys" @@ -240,7 +238,6 @@ def extract_authorized_keys(username, paths): auth_key_fn = auth_key_fn.replace("%%", '%') if not auth_key_fn.startswith('/'): auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn) - auth_key_fn = paths.join(False, auth_key_fn) except (IOError, OSError): # Give up and use a default key filename auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') @@ -248,14 +245,13 @@ def extract_authorized_keys(username, paths): " in ssh config" " from %r, using 'AuthorizedKeysFile' file" " %r instead"), - sshd_conf_fn, auth_key_fn) - auth_key_entries = parse_authorized_keys(auth_key_fn) - return (auth_key_fn, auth_key_entries) + DEF_SSHD_CFG, auth_key_fn) + return (auth_key_fn, parse_authorized_keys(auth_key_fn)) -def setup_user_keys(keys, username, key_prefix, paths): +def setup_user_keys(keys, username, key_prefix): # Make sure the users .ssh dir is setup accordingly - (ssh_dir, pwent) = users_ssh_info(username, paths) + (ssh_dir, pwent) = users_ssh_info(username) if not os.path.isdir(ssh_dir): util.ensure_dir(ssh_dir, mode=0700) util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) @@ -267,7 +263,7 @@ def setup_user_keys(keys, username, key_prefix, paths): key_entries.append(parser.parse(str(k), def_opt=key_prefix)) # Extract the old and make the new - (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths) + (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) with util.SeLinuxGuard(ssh_dir, recursive=True): content = update_authorized_keys(auth_key_entries, key_entries) util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700) diff --git a/pylintrc b/pylintrc new file mode 100644 index 00000000..ee886510 --- /dev/null +++ b/pylintrc @@ -0,0 +1,19 @@ +[General] +init-hook='import sys; sys.path.append("tests/")' + +[MESSAGES CONTROL] +# See: http://pylint-messages.wikidot.com/all-codes +# W0142: *args and **kwargs are fine. +# W0511: TODOs in code comments are fine. +# W0702: No exception type(s) specified +# W0703: Catch "Exception" +# C0103: Invalid name +# C0111: Missing docstring +disable=W0142,W0511,W0702,W0703,C0103,C0111 + +[REPORTS] +reports=no +include-ids=yes + +[FORMAT] +max-line-length=79 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/test_datasource/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/test_distros/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/test_filters/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py index 1e9b9053..773bb312 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/test_filters/test_launch_index.py @@ -1,14 +1,6 @@ import copy -import os -import sys -top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") -top_dir = os.path.abspath(top_dir) -if os.path.exists(top_dir): - sys.path.insert(0, os.path.dirname(top_dir)) - - -import helpers +from tests.unittests import helpers import itertools diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/test_handler/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index d3df5c50..d73c9fa9 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -77,7 +77,7 @@ class TestConfig(MockerTestCase): """Test that a single cert gets passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1"]}} - self.mock_add(self.paths, ["CERT1"]) + self.mock_add(["CERT1"]) self.mock_update() self.mocker.replay() @@ -87,7 +87,7 @@ class TestConfig(MockerTestCase): """Test that multiple certs get passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} - self.mock_add(self.paths, ["CERT1", "CERT2"]) + self.mock_add(["CERT1", "CERT2"]) self.mock_update() self.mocker.replay() @@ -97,7 +97,7 @@ class TestConfig(MockerTestCase): """Test remove_defaults works as expected.""" config = {"ca-certs": {"remove-defaults": True}} - self.mock_remove(self.paths) + self.mock_remove() self.mock_update() self.mocker.replay() @@ -116,8 +116,8 @@ class TestConfig(MockerTestCase): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} - self.mock_remove(self.paths) - self.mock_add(self.paths, ["CERT1"]) + self.mock_remove() + self.mock_add(["CERT1"]) self.mock_update() self.mocker.replay() @@ -136,7 +136,7 @@ class TestAddCaCerts(MockerTestCase): """Test that no certificate are written if not provided.""" self.mocker.replace(util.write_file, passthrough=False) self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, []) + cc_ca_certs.add_ca_certs([]) def test_single_cert(self): """Test adding a single certificate to the trusted CAs.""" @@ -149,7 +149,7 @@ class TestAddCaCerts(MockerTestCase): "\ncloud-init-ca-certs.crt", omode="ab") self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, [cert]) + cc_ca_certs.add_ca_certs([cert]) def test_multiple_certs(self): """Test adding multiple certificates to the trusted CAs.""" @@ -163,7 +163,7 @@ class TestAddCaCerts(MockerTestCase): "\ncloud-init-ca-certs.crt", omode="ab") self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, certs) + cc_ca_certs.add_ca_certs(certs) class TestUpdateCaCerts(MockerTestCase): @@ -198,4 +198,4 @@ class TestRemoveDefaultCaCerts(MockerTestCase): "ca-certificates ca-certificates/trust_new_crts select no") self.mocker.replay() - cc_ca_certs.remove_default_ca_certs(self.paths) + cc_ca_certs.remove_default_ca_certs() diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/test_runs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 1e852e1e..22d6cf2c 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -1,14 +1,6 @@ import os -import sys -# Allow running this test individually -top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") -top_dir = os.path.abspath(top_dir) -if os.path.exists(top_dir): - sys.path.insert(0, os.path.dirname(top_dir)) - - -import helpers +from tests.unittests import helpers from cloudinit.settings import (PER_INSTANCE) from cloudinit import stages diff --git a/tools/run-pylint b/tools/run-pylint index 7ef44ac5..b74efda9 100755 --- a/tools/run-pylint +++ b/tools/run-pylint @@ -6,23 +6,16 @@ else files=( "$@" ); fi +RC_FILE="pylintrc" +if [ ! -f $RC_FILE ]; then + RC_FILE="../pylintrc" +fi + cmd=( pylint - --reports=n - --include-ids=y - --max-line-length=79 - + --rcfile=$RC_FILE --disable=R --disable=I - - --disable=W0142 # Used * or ** magic - --disable=W0511 # TODO/FIXME note - --disable=W0702 # No exception type(s) specified - --disable=W0703 # Catch "Exception" - - --disable=C0103 # Invalid name - --disable=C0111 # Missing docstring - "${files[@]}" ) -- cgit v1.2.3 From c508fa0c4159f93df077a2d46ed481fd5d8c8803 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 6 Nov 2012 15:28:29 -0500 Subject: tools/Z99-cloud-locale-test.sh: avoid warning when shell is zsh LP: #1073077 --- ChangeLog | 2 ++ tools/Z99-cloud-locale-test.sh | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index d03c0293..1243ce7f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -6,6 +6,8 @@ - cc_landscape: restart landscape after install or config (LP: #1070345) - multipart/archive. do not fail on unknown headers in multipart mime or cloud-archive config (LP: #1065116). + - tools/Z99-cloud-locale-test.sh: avoid warning when user's shell is + zsh (LP: #1073077) 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh index 8ad485e8..4012beeb 100755 --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -22,7 +22,7 @@ locale_warn() { case "$w1" in locale:) bad_names="${bad_names} ${w4}";; *) - key=${w1%%=*} + key=${w1%%\=*} val=${w1#*=} val=${val#\"} val=${val%\"} @@ -31,7 +31,7 @@ locale_warn() { done for bad in $bad_names; do for var in ${vars}; do - [ "${bad}" = "${var%=*}" ] || continue + [ "${bad}" = "${var%\=*}" ] || continue value=${var#*=} [ "${bad_lcs#* ${value}}" = "${bad_lcs}" ] && bad_lcs="${bad_lcs} ${value}" -- cgit v1.2.3 From 7d027a031a3649ac04965a09cd26563ac9d760fd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 6 Nov 2012 14:13:30 -0800 Subject: Fix the case where a unknown type is seen and it has contents which are in unicode which seems to cause python to blow-up when this happens since 'string-escape' doesn't work on unicode (at least in 2.6). LP: #1075756 --- cloudinit/handlers/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 99caed1f..e843eb75 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -171,7 +171,11 @@ def walker_callback(pdata, ctype, filename, payload): elif payload: # Extract the first line or 24 bytes for displaying in the log start = _extract_first_or_bytes(payload, 24) - details = "'%s...'" % (start.encode("string-escape")) + try: + details = "'%s...'" % (start.encode("string-escape")) + except TypeError: + # Unicode doesn't support string-escape... + details = "'%s...'" % (start) if ctype == NOT_MULTIPART_TYPE: LOG.warning("Unhandled non-multipart (%s) userdata: %s", ctype, details) -- cgit v1.2.3 From 7ec0ef04b975eb5b4c40f7ae746d706585c73a02 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 6 Nov 2012 14:24:19 -0800 Subject: Use a method instead + at least attempt the unicode-escape path. --- cloudinit/handlers/__init__.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index e843eb75..d847f331 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -160,6 +160,19 @@ def _extract_first_or_bytes(blob, size): return start +def _escape_string(text): + try: + text = "'%s...'" % (text.encode("string-escape")) + except TypeError: + try: + # Unicode doesn't support string-escape... + text = "'%s...'" % (text.encode('unicode-escape')) + except TypeError: + # Give up... + text = "'%s...'" % (text) + return text + + def walker_callback(pdata, ctype, filename, payload): if ctype in PART_CONTENT_TYPES: walker_handle_handler(pdata, ctype, filename, payload) @@ -171,11 +184,7 @@ def walker_callback(pdata, ctype, filename, payload): elif payload: # Extract the first line or 24 bytes for displaying in the log start = _extract_first_or_bytes(payload, 24) - try: - details = "'%s...'" % (start.encode("string-escape")) - except TypeError: - # Unicode doesn't support string-escape... - details = "'%s...'" % (start) + details = _escape_string(start) if ctype == NOT_MULTIPART_TYPE: LOG.warning("Unhandled non-multipart (%s) userdata: %s", ctype, details) -- cgit v1.2.3 From 9850895442afe55079cecf4fd96fe8430ed960ea Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 6 Nov 2012 14:27:56 -0800 Subject: Do the append after escape. --- cloudinit/handlers/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index d847f331..8d6dcd4d 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -162,14 +162,14 @@ def _extract_first_or_bytes(blob, size): def _escape_string(text): try: - text = "'%s...'" % (text.encode("string-escape")) + return text.encode("string-escape") except TypeError: try: # Unicode doesn't support string-escape... - text = "'%s...'" % (text.encode('unicode-escape')) + return text.encode('unicode-escape') except TypeError: # Give up... - text = "'%s...'" % (text) + pass return text @@ -184,7 +184,7 @@ def walker_callback(pdata, ctype, filename, payload): elif payload: # Extract the first line or 24 bytes for displaying in the log start = _extract_first_or_bytes(payload, 24) - details = _escape_string(start) + details = "'%s...'" % (_escape_string(start)) if ctype == NOT_MULTIPART_TYPE: LOG.warning("Unhandled non-multipart (%s) userdata: %s", ctype, details) -- cgit v1.2.3 From 5ffd19a0fbc473e34ac78f80d264c08ac0125e2b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 09:43:25 -0500 Subject: fix pep8 warnings --- cloudinit/distros/__init__.py | 2 +- tests/unittests/test_datasource/test_configdrive.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 21efe8d9..c848b909 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -439,7 +439,7 @@ def _normalize_groups(grp_cfg): # configuration. # # The output is a dictionary of user -# names => user config which is the standard +# names => user config which is the standard # form used in the rest of cloud-init. Note # the default user will have a special config # entry 'default' which will be marked as true diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 4fa13db8..00379e03 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -6,11 +6,10 @@ import os.path import mocker from mocker import MockerTestCase -from cloudinit.sources import DataSourceConfigDrive as ds +from cloudinit import helpers from cloudinit import settings +from cloudinit.sources import DataSourceConfigDrive as ds from cloudinit import util -from cloudinit import helpers - PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' -- cgit v1.2.3 From df3300e23324f2bb4d0d21a433b03708dc2feaeb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 10:07:03 -0500 Subject: update config to address name change --- config/cloud.cfg | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/cloud.cfg b/config/cloud.cfg index b3411d11..51a05821 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -43,7 +43,8 @@ cloud_config_modules: - set-passwords - grub-dpkg - apt-pipelining - - apt-update-upgrade + - apt-configure + - package-update-upgrade-install - landscape - timezone - puppet -- cgit v1.2.3 From 0da58fe113726c7654bca54b365d95044b44ef87 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 10:17:50 -0500 Subject: add ChangeLog entry --- ChangeLog | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ChangeLog b/ChangeLog index 4287d58f..2896f71a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -3,6 +3,9 @@ - config-drive: map hostname to local-hostname (LP: #1061964) - landscape: install landscape-client package if not installed. only take action if cloud-config is present (LP: #1066115) + - split 'apt-update-upgrade' config module into 'apt-configure' and + 'package-update-upgrade-install'. The 'package-update-upgrade-install' + will be a cross distro module. 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From 180620470ba9aae4aac804b8bd66d3af8bd71ee4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 10:29:20 -0500 Subject: adjust documentation to account for apt/package aliases --- doc/examples/cloud-config.txt | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 56a6c35a..04bb5df1 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -3,15 +3,20 @@ # (ie run apt-get update) # # Default: true -# -apt_update: false +# Aliases: apt_update +package_update: false # Upgrade the instance on first boot # (ie run apt-get upgrade) # # Default: false -# -apt_upgrade: true +# Aliases: apt_upgrade +package_upgrade: true + +# Reboot after package install/update if necessary +# Default: false +# Aliases: apt_reboot_if_required +package_reboot_if_required: true # Add apt repositories # -- cgit v1.2.3 From 8b677bc80a39a0eb1a3cc7989c6bffb959fc7a69 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 10:35:39 -0500 Subject: trivial: -name first is faster due to no need for stat --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8f5646b7..88c90b9b 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ CWD=$(shell pwd) -PY_FILES=$(shell find cloudinit bin tests tools -type f -name "*.py") +PY_FILES=$(shell find cloudinit bin tests tools -name "*.py" -type f ) PY_FILES+="bin/cloud-init" all: test -- cgit v1.2.3 From 1e6fc277a1c8d695c37741cc31f5ddab3d5b5600 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 16:08:17 -0500 Subject: remove dead code from DataSourceEc2 --- cloudinit/sources/DataSourceEc2.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 3686fa10..cff50669 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -86,9 +86,6 @@ class DataSourceEc2(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def get_availability_zone(self): - return self.metadata['placement']['availability-zone'] - def _get_url_settings(self): mcfg = self.ds_cfg if not mcfg: @@ -198,19 +195,6 @@ class DataSourceEc2(sources.DataSource): return None return ofound - def is_vpc(self): - # See: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/615545 - # Detect that the machine was launched in a VPC. - # But I did notice that when in a VPC, meta-data - # does not have public-ipv4 and public-hostname - # listed as a possibility. - ph = "public-hostname" - p4 = "public-ipv4" - if ((ph not in self.metadata or self.metadata[ph] == "") and - (p4 not in self.metadata or self.metadata[p4] == "")): - return True - return False - @property def availability_zone(self): try: -- cgit v1.2.3 From 7ea02ab1d8ee0f400a84ee2d688e67ffbc449bf0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 7 Nov 2012 21:00:33 -0800 Subject: Add a makefile yaml checking target and fix the cases where the cc yaml is not correct. --- Makefile | 9 ++++++++- doc/examples/cloud-config.txt | 3 +-- tests/configs/sample1.yaml | 1 - tools/validate-yaml.py | 20 ++++++++++++++++++++ 4 files changed, 29 insertions(+), 4 deletions(-) create mode 100755 tools/validate-yaml.py diff --git a/Makefile b/Makefile index 88c90b9b..2a6be961 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,10 @@ CWD=$(shell pwd) PY_FILES=$(shell find cloudinit bin tests tools -name "*.py" -type f ) PY_FILES+="bin/cloud-init" +YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f ) +YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) + + all: test pep8: @@ -23,11 +27,14 @@ clean: rm -rf /var/log/cloud-init.log \ /var/lib/cloud/ +yaml: + @$(CWD)/tools/validate-yaml.py $(YAML_FILES) + rpm: ./packages/brpm deb: ./packages/bddeb -.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb +.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb yaml diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 04bb5df1..12bf2c91 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -355,8 +355,7 @@ rsyslog: - ':syslogtag, isequal, "[CLOUDINIT]" /var/log/cloud-foo.log' - content: "*.* @@192.0.2.1:10514" - filename: 01-examplecom.conf - content: | - *.* @@syslogd.example.com + content: "*.* @@syslogd.example.com" # resize_rootfs should the / filesytem be resized on first boot # this allows you to launch an instance with a larger disk / partition diff --git a/tests/configs/sample1.yaml b/tests/configs/sample1.yaml index 24e874ee..6231f293 100644 --- a/tests/configs/sample1.yaml +++ b/tests/configs/sample1.yaml @@ -50,4 +50,3 @@ runcmd: byobu_by_default: user -output: {all: '| tee -a /var/log/cloud-init-output.log'} diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py new file mode 100755 index 00000000..d3218e40 --- /dev/null +++ b/tools/validate-yaml.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +"""Try to read a YAML file and report any errors. +""" + +import sys + +import yaml + + +if __name__ == "__main__": + for fn in sys.argv[1:]: + sys.stdout.write("%s" % (fn)) + try: + fh = open(fn, 'r') + yaml.safe_load(fh.read()) + fh.close() + sys.stdout.write(" - ok\n") + except Exception, e: + sys.stdout.write(" - bad (%s)\n" % (e)) -- cgit v1.2.3 From 791598f2929a5b8b6bb380f7f16ec568db96aba6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 7 Nov 2012 21:34:41 -0800 Subject: Start adding a 'migrator' module that can be used to aid in the moving of older versions of cloud-inits data to newer versions of cloud-inits data. 1. Move the semaphores for the current instance to there canonicalized names and use the canonicalized in the file 'locking' code --- cloudinit/config/cc_migrator.py | 53 +++++++++++++++++++++++++++++++++++++++++ cloudinit/helpers.py | 9 ++++++- config/cloud.cfg | 1 + 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 cloudinit/config/cc_migrator.py diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py new file mode 100644 index 00000000..b71d1d17 --- /dev/null +++ b/cloudinit/config/cc_migrator.py @@ -0,0 +1,53 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import shutil + +from cloudinit import helpers +from cloudinit import util + +from cloudinit.settings import PER_ALWAYS + +frequency = PER_ALWAYS + + +def _migrate_canon_sems(cloud): + sem_path = cloud.paths.get_ipath('sem') + if not sem_path or not os.path.exists(sem_path): + return 0 + am_adjusted = 0 + for p in os.listdir(sem_path): + full_path = os.path.join(sem_path, p) + if os.path.isfile(full_path): + canon_p = helpers.canon_sem_name(p) + if canon_p != p: + new_path = os.path.join(sem_path, p) + shutil.move(full_path, new_path) + am_adjusted += 1 + return am_adjusted + + +def handle(name, cfg, cloud, log, _args): + do_migrate = util.get_cfg_option_str(cfg, "migrate", True) + if not util.translate_bool(do_migrate): + log.debug("Skipping module named %s, migration disabled", name) + return + sems_moved = _migrate_canon_sems(cloud) + log.debug("Migrated %s semaphore files to there canonicalized names", + sems_moved) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 985ce3e5..d26625a0 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -71,12 +71,17 @@ class FileLock(object): return "<%s using file %r>" % (util.obj_name(self), self.fn) +def canon_sem_name(name): + return name.replace("-", "_") + + class FileSemaphores(object): - def __init__(self, sem_path): + def __init__(self, sem_path): self.sem_path = sem_path @contextlib.contextmanager def lock(self, name, freq, clear_on_fail=False): + name = canon_sem_name(name) try: yield self._acquire(name, freq) except: @@ -85,6 +90,7 @@ class FileSemaphores(object): raise def clear(self, name, freq): + name = canon_sem_name(name) sem_file = self._get_path(name, freq) try: util.del_file(sem_file) @@ -119,6 +125,7 @@ class FileSemaphores(object): def has_run(self, name, freq): if not freq or freq == PER_ALWAYS: return False + name = canon_sem_name(name) sem_file = self._get_path(name, freq) # This isn't really a good atomic check # but it suffices for where and when cloudinit runs diff --git a/config/cloud.cfg b/config/cloud.cfg index 05bb4eef..ad100fff 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -23,6 +23,7 @@ preserve_hostname: false # The modules that run in the 'init' stage cloud_init_modules: + - migrator - bootcmd - write-files - resizefs -- cgit v1.2.3 From 3de3c535f37e40a79b36997a93fa218534117397 Mon Sep 17 00:00:00 2001 From: harlowja Date: Wed, 7 Nov 2012 23:16:21 -0800 Subject: 1. Check the name and not the full path when applying the canon routine. 2. Add in a function to migrate legacy semaphores to new semaphores. --- cloudinit/config/cc_migrator.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index b71d1d17..56f33d42 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -35,14 +35,41 @@ def _migrate_canon_sems(cloud): for p in os.listdir(sem_path): full_path = os.path.join(sem_path, p) if os.path.isfile(full_path): - canon_p = helpers.canon_sem_name(p) - if canon_p != p: - new_path = os.path.join(sem_path, p) + (name, ext) = os.path.splitext(p) + canon_name = helpers.canon_sem_name(name) + if canon_name != name: + new_path = os.path.join(sem_path, canon_name + ext) shutil.move(full_path, new_path) am_adjusted += 1 return am_adjusted +def _migrate_legacy_sems(cloud, log): + sem_path = cloud.paths.get_ipath('sem') + touch_there = { + 'apt-update-upgrade': [ + 'apt-configure', + 'package-update-upgrade-install', + ], + } + sem_helper = helpers.FileSemaphores(sem_path) + for (mod_name, migrate_to) in touch_there.items(): + possibles = [mod_name, helpers.canon_sem_name(mod_name)] + old_exists = [] + for p in os.listdir(sem_path): + (name, _ext) = os.path.splitext(p) + if name in possibles and os.path.isfile(p): + old_exists.append(p) + for p in old_exists: + util.del_file(os.path.join(sem_path, p)) + (_name, freq) = os.path.splitext(p) + for m in migrate_to: + log.debug("Migrating %s => %s with the same frequency", + p, m) + with sem_helper.lock(m, freq): + pass + + def handle(name, cfg, cloud, log, _args): do_migrate = util.get_cfg_option_str(cfg, "migrate", True) if not util.translate_bool(do_migrate): @@ -51,3 +78,4 @@ def handle(name, cfg, cloud, log, _args): sems_moved = _migrate_canon_sems(cloud) log.debug("Migrated %s semaphore files to there canonicalized names", sems_moved) + _migrate_legacy_sems(cloud, log) -- cgit v1.2.3 From 196badd4cfa5f9f76be1138fb2d073649af3e031 Mon Sep 17 00:00:00 2001 From: harlowja Date: Wed, 7 Nov 2012 23:20:40 -0800 Subject: 1. Ensure that the sem_path exists and is actually a valid value returned. 2. Adjust variable naming --- cloudinit/config/cc_migrator.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 56f33d42..58232fc9 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -46,14 +46,16 @@ def _migrate_canon_sems(cloud): def _migrate_legacy_sems(cloud, log): sem_path = cloud.paths.get_ipath('sem') - touch_there = { + if not sem_path or not os.path.exists(sem_path): + return + legacy_adjust = { 'apt-update-upgrade': [ 'apt-configure', 'package-update-upgrade-install', ], } sem_helper = helpers.FileSemaphores(sem_path) - for (mod_name, migrate_to) in touch_there.items(): + for (mod_name, migrate_to) in legacy_adjust.items(): possibles = [mod_name, helpers.canon_sem_name(mod_name)] old_exists = [] for p in os.listdir(sem_path): -- cgit v1.2.3 From ce14139c0f94b99c8c47192620b0a9faf66a96a2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Nov 2012 09:30:51 -0500 Subject: revert old zsh fix (revno 697) --- tools/Z99-cloud-locale-test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh index 4012beeb..8ad485e8 100755 --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -22,7 +22,7 @@ locale_warn() { case "$w1" in locale:) bad_names="${bad_names} ${w4}";; *) - key=${w1%%\=*} + key=${w1%%=*} val=${w1#*=} val=${val#\"} val=${val%\"} @@ -31,7 +31,7 @@ locale_warn() { done for bad in $bad_names; do for var in ${vars}; do - [ "${bad}" = "${var%\=*}" ] || continue + [ "${bad}" = "${var%=*}" ] || continue value=${var#*=} [ "${bad_lcs#* ${value}}" = "${bad_lcs}" ] && bad_lcs="${bad_lcs} ${value}" -- cgit v1.2.3 From 380bd3f3417c640191bdf90f6622f8bbbe12b5c7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Nov 2012 09:31:11 -0500 Subject: remove unused variable 'cr'. fix usage of 'value' to local 'val' --- tools/Z99-cloud-locale-test.sh | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh index 8ad485e8..97c92166 100755 --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -10,9 +10,7 @@ # locale_warn() { - local cr=" -" - local bad_names="" bad_lcs="" key="" value="" var="" + local bad_names="" bad_lcs="" key="" val="" var="" vars="" local w1 w2 w3 w4 remain # locale is expected to output either: # VARIABLE= @@ -32,9 +30,9 @@ locale_warn() { for bad in $bad_names; do for var in ${vars}; do [ "${bad}" = "${var%=*}" ] || continue - value=${var#*=} - [ "${bad_lcs#* ${value}}" = "${bad_lcs}" ] && - bad_lcs="${bad_lcs} ${value}" + val=${var#*=} + [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] && + bad_lcs="${bad_lcs} ${val}" break done done -- cgit v1.2.3 From 956e94d16ab9c94471d3829424e8bd5183f3a735 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Nov 2012 09:31:26 -0500 Subject: work with zsh by using 'emulate -L sh'. This makes zsh act like 'sh', but only for the function local function. This way, we do not affect the user's shell, but get the behavior we want. --- tools/Z99-cloud-locale-test.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh index 97c92166..3c51f22d 100755 --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -12,6 +12,11 @@ locale_warn() { local bad_names="" bad_lcs="" key="" val="" var="" vars="" local w1 w2 w3 w4 remain + + # if shell is zsh, act like sh only for this function (-L). + # The behavior change will not permenently affect user's shell. + [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh + # locale is expected to output either: # VARIABLE= # VARIABLE="value" -- cgit v1.2.3 From d8e82fb7bc9da6259b158804ae3d8343050c67aa Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 8 Nov 2012 08:55:04 -0800 Subject: Add non-zero exit code when bad yamls are found instead of returning zero in that case. --- tools/validate-yaml.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py index d3218e40..eda59cb8 100755 --- a/tools/validate-yaml.py +++ b/tools/validate-yaml.py @@ -9,6 +9,7 @@ import yaml if __name__ == "__main__": + bads = 0 for fn in sys.argv[1:]: sys.stdout.write("%s" % (fn)) try: @@ -18,3 +19,8 @@ if __name__ == "__main__": sys.stdout.write(" - ok\n") except Exception, e: sys.stdout.write(" - bad (%s)\n" % (e)) + bads += 1 + if bads > 0: + sys.exit(1) + else: + sys.exit(0) -- cgit v1.2.3 From 154ad87b29344ea4d29d92f8559f61bb6efe6530 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 8 Nov 2012 16:30:57 -0800 Subject: Ensure that at needed stages the local variables of the init class are reset so that when they are regenerated that they will use the updated data instead of using previous data (since they weren't reset). LP: #1076811 --- cloudinit/stages.py | 31 +++++++++++++-- tests/data/user_data.1.txt | 15 +++++++ tests/unittests/test_merging.py | 62 +++++++++++++++++++++++++++++ tests/unittests/test_runs/test_merge_run.py | 52 ++++++++++++++++++++++++ tests/unittests/test_util.py | 59 --------------------------- tools/run-pep8 | 13 +++++- 6 files changed, 169 insertions(+), 63 deletions(-) create mode 100644 tests/data/user_data.1.txt create mode 100644 tests/unittests/test_merging.py create mode 100644 tests/unittests/test_runs/test_merge_run.py diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 4ed1a750..e0cf1cbe 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -36,6 +36,8 @@ from cloudinit.handlers import cloud_config as cc_part from cloudinit.handlers import shell_script as ss_part from cloudinit.handlers import upstart_job as up_part +from cloudinit.sources import DataSourceNone + from cloudinit import cloud from cloudinit import config from cloudinit import distros @@ -58,8 +60,16 @@ class Init(object): self._cfg = None self._paths = None self._distro = None - # Created only when a fetch occurs - self.datasource = None + # Changed only when a fetch occurs + self.datasource = DataSourceNone.DataSourceNone({}, None, None) + + def _reset(self, ds=False): + # Recreated on access + self._cfg = None + self._paths = None + self._distro = None + if ds: + self.datasource = DataSourceNone.DataSourceNone({}, None, None) @property def distro(self): @@ -236,7 +246,7 @@ class Init(object): self.datasource = ds # Ensure we adjust our path members datasource # now that we have one (thus allowing ipath to be used) - self.paths.datasource = ds + self._reset() return ds def _get_instance_subdirs(self): @@ -296,6 +306,10 @@ class Init(object): util.write_file(iid_fn, "%s\n" % iid) util.write_file(os.path.join(dp, 'previous-instance-id'), "%s\n" % (previous_iid)) + # Ensure needed components are regenerated + # after change of instance which may cause + # change of configuration + self._reset() return iid def fetch(self): @@ -409,6 +423,17 @@ class Init(object): handlers.call_end(mod, data, frequency) called.append(mod) + # Perform post-consumption adjustments so that + # modules that run during the init stage reflect + # this consumed set. + # + # They will be recreated on future access... + self._reset() + # Note(harlowja): the 'active' datasource will have + # references to the previous config, distro, paths + # objects before the load of the userdata happened, + # this is expected. + class Modules(object): def __init__(self, init, cfg_files=None): diff --git a/tests/data/user_data.1.txt b/tests/data/user_data.1.txt new file mode 100644 index 00000000..4c4543de --- /dev/null +++ b/tests/data/user_data.1.txt @@ -0,0 +1,15 @@ +#cloud-config +write_files: +- content: blah + path: /etc/blah.ini + permissions: 493 + +system_info: + package_mirrors: + - arches: [i386, amd64, blah] + failsafe: + primary: http://my.archive.mydomain.com/ubuntu + security: http://my.security.mydomain.com/ubuntu + search: + primary: [] + security: [] diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py new file mode 100644 index 00000000..0037b966 --- /dev/null +++ b/tests/unittests/test_merging.py @@ -0,0 +1,62 @@ +from mocker import MockerTestCase + +from cloudinit import util + + +class TestMergeDict(MockerTestCase): + def test_simple_merge(self): + """Test simple non-conflict merge.""" + source = {"key1": "value1"} + candidate = {"key2": "value2"} + result = util.mergedict(source, candidate) + self.assertEqual({"key1": "value1", "key2": "value2"}, result) + + def test_nested_merge(self): + """Test nested merge.""" + source = {"key1": {"key1.1": "value1.1"}} + candidate = {"key1": {"key1.2": "value1.2"}} + result = util.mergedict(source, candidate) + self.assertEqual( + {"key1": {"key1.1": "value1.1", "key1.2": "value1.2"}}, result) + + def test_merge_does_not_override(self): + """Test that candidate doesn't override source.""" + source = {"key1": "value1", "key2": "value2"} + candidate = {"key1": "value2", "key2": "NEW VALUE"} + result = util.mergedict(source, candidate) + self.assertEqual(source, result) + + def test_empty_candidate(self): + """Test empty candidate doesn't change source.""" + source = {"key": "value"} + candidate = {} + result = util.mergedict(source, candidate) + self.assertEqual(source, result) + + def test_empty_source(self): + """Test empty source is replaced by candidate.""" + source = {} + candidate = {"key": "value"} + result = util.mergedict(source, candidate) + self.assertEqual(candidate, result) + + def test_non_dict_candidate(self): + """Test non-dict candidate is discarded.""" + source = {"key": "value"} + candidate = "not a dict" + result = util.mergedict(source, candidate) + self.assertEqual(source, result) + + def test_non_dict_source(self): + """Test non-dict source is not modified with a dict candidate.""" + source = "not a dict" + candidate = {"key": "value"} + result = util.mergedict(source, candidate) + self.assertEqual(source, result) + + def test_neither_dict(self): + """Test if neither candidate or source is dict source wins.""" + source = "source" + candidate = "candidate" + result = util.mergedict(source, candidate) + self.assertEqual(source, result) diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py new file mode 100644 index 00000000..04c03730 --- /dev/null +++ b/tests/unittests/test_runs/test_merge_run.py @@ -0,0 +1,52 @@ +import os + +from tests.unittests import helpers + +from cloudinit.settings import (PER_INSTANCE) +from cloudinit import stages +from cloudinit import util + + +class TestSimpleRun(helpers.FilesystemMockingTestCase): + def _patchIn(self, root): + self.restore() + self.patchOS(root) + self.patchUtils(root) + + def test_none_ds(self): + new_root = self.makeDir() + self.replicateTestRoot('simple_ubuntu', new_root) + cfg = { + 'datasource_list': ['None'], + 'cloud_init_modules': ['write-files'], + } + ud = self.readResource('user_data.1.txt') + cloud_cfg = util.yaml_dumps(cfg) + util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) + util.write_file(os.path.join(new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) + self._patchIn(new_root) + + # Now start verifying whats created + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.datasource.userdata_raw = ud + iid = initer.instancify() + initer.update() + initer.cloudify().run('consume_userdata', + initer.consume_userdata, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mirrors = initer.distro.get_option('package_mirrors') + self.assertEquals(1, len(mirrors)) + mirror = mirrors[0] + self.assertEquals(mirror['arches'], ['i386', 'amd64', 'blah']) + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertTrue(os.path.exists('/etc/blah.ini')) + self.assertIn('write-files', which_ran) + contents = util.load_file('/etc/blah.ini') + self.assertEquals(contents, 'blah') diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 96962b91..02611581 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -28,65 +28,6 @@ class FakeSelinux(object): self.restored.append(path) -class TestMergeDict(MockerTestCase): - def test_simple_merge(self): - """Test simple non-conflict merge.""" - source = {"key1": "value1"} - candidate = {"key2": "value2"} - result = util.mergedict(source, candidate) - self.assertEqual({"key1": "value1", "key2": "value2"}, result) - - def test_nested_merge(self): - """Test nested merge.""" - source = {"key1": {"key1.1": "value1.1"}} - candidate = {"key1": {"key1.2": "value1.2"}} - result = util.mergedict(source, candidate) - self.assertEqual( - {"key1": {"key1.1": "value1.1", "key1.2": "value1.2"}}, result) - - def test_merge_does_not_override(self): - """Test that candidate doesn't override source.""" - source = {"key1": "value1", "key2": "value2"} - candidate = {"key1": "value2", "key2": "NEW VALUE"} - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_empty_candidate(self): - """Test empty candidate doesn't change source.""" - source = {"key": "value"} - candidate = {} - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_empty_source(self): - """Test empty source is replaced by candidate.""" - source = {} - candidate = {"key": "value"} - result = util.mergedict(source, candidate) - self.assertEqual(candidate, result) - - def test_non_dict_candidate(self): - """Test non-dict candidate is discarded.""" - source = {"key": "value"} - candidate = "not a dict" - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_non_dict_source(self): - """Test non-dict source is not modified with a dict candidate.""" - source = "not a dict" - candidate = {"key": "value"} - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_neither_dict(self): - """Test if neither candidate or source is dict source wins.""" - source = "source" - candidate = "candidate" - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - class TestGetCfgOptionListOrStr(TestCase): def test_not_found_no_default(self): """None is returned if key is not found and no default given.""" diff --git a/tools/run-pep8 b/tools/run-pep8 index ad55d420..1dfa92c3 100755 --- a/tools/run-pep8 +++ b/tools/run-pep8 @@ -21,10 +21,21 @@ else base=`pwd`/tools/ fi +IGNORE="E501" # Line too long (these are caught by pylint) + +# King Arthur: Be quiet! ... Be Quiet! I Order You to Be Quiet. +IGNORE="$IGNORE,E121" # Continuation line indentation is not a multiple of four +IGNORE="$IGNORE,E123" # Closing bracket does not match indentation of opening bracket's line +IGNORE="$IGNORE,E124" # Closing bracket missing visual indentation +IGNORE="$IGNORE,E125" # Continuation line does not distinguish itself from next logical line +IGNORE="$IGNORE,E126" # Continuation line over-indented for hanging indent +IGNORE="$IGNORE,E127" # Continuation line over-indented for visual indent +IGNORE="$IGNORE,E128" # Continuation line under-indented for visual indent + cmd=( ${base}/hacking.py - --ignore=E501 # Line too long (these are caught by pylint) + --ignore="$IGNORE" "${files[@]}" ) -- cgit v1.2.3 From b80c2401123e16b9038ff3fb6f6d660717ee68e1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 8 Nov 2012 17:37:16 -0800 Subject: Fix the case where on a redhat based system the fully qualified domain name should end up in /etc/sysconfig/network by passing the fqdn to the update and set hostname methods and using it accordingly. LP: #1076759 --- cloudinit/config/cc_set_hostname.py | 10 ++++++---- cloudinit/config/cc_update_hostname.py | 8 +++++--- cloudinit/distros/__init__.py | 4 ++-- cloudinit/distros/debian.py | 4 ++-- cloudinit/distros/rhel.py | 25 +++++++++++++++++-------- 5 files changed, 32 insertions(+), 19 deletions(-) diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index b0f27ebf..2b32fc94 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -27,9 +27,11 @@ def handle(name, cfg, cloud, log, _args): " not setting the hostname in module %s"), name) return - (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: - log.debug("Setting hostname to %s", hostname) - cloud.distro.set_hostname(hostname) + log.debug("Setting the hostname to %s (%s)", fqdn, hostname) + cloud.distro.set_hostname(hostname, fqdn) except Exception: - util.logexc(log, "Failed to set hostname to %s", hostname) + util.logexc(log, "Failed to set the hostname to %s (%s)", + fqdn, hostname) + raise diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index 1d6679ea..52225cd8 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -32,10 +32,12 @@ def handle(name, cfg, cloud, log, _args): " not updating the hostname in module %s"), name) return - (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname") - cloud.distro.update_hostname(hostname, prev_fn) + log.debug("Updating hostname to %s (%s)", fqdn, hostname) + cloud.distro.update_hostname(hostname, fqdn, prev_fn) except Exception: - util.logexc(log, "Failed to set the hostname to %s", hostname) + util.logexc(log, "Failed to update the hostname to %s (%s)", + fqdn, hostname) raise diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 869540d2..bd04ba79 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -58,11 +58,11 @@ class Distro(object): return self._cfg.get(opt_name, default) @abc.abstractmethod - def set_hostname(self, hostname): + def set_hostname(self, hostname, fqdn=None): raise NotImplementedError() @abc.abstractmethod - def update_hostname(self, hostname, prev_hostname_fn): + def update_hostname(self, hostname, fqdn, prev_hostname_fn): raise NotImplementedError() @abc.abstractmethod diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index cc7e53a0..ed4070b4 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -67,7 +67,7 @@ class Distro(distros.Distro): else: return distros.Distro._bring_up_interfaces(self, device_names) - def set_hostname(self, hostname): + def set_hostname(self, hostname, fqdn=None): self._write_hostname(hostname, "/etc/hostname") LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -76,7 +76,7 @@ class Distro(distros.Distro): # "" gives trailing newline. util.write_file(out_fn, "%s\n" % str(hostname), 0644) - def update_hostname(self, hostname, prev_fn): + def update_hostname(self, hostname, fqdn, prev_fn): hostname_prev = self._read_hostname(prev_fn) hostname_in_etc = self._read_hostname("/etc/hostname") update_files = [] diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index bf3c18d2..e4c27216 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -146,8 +146,13 @@ class Distro(distros.Distro): lines.insert(0, _make_header()) util.write_file(fn, "\n".join(lines), 0644) - def set_hostname(self, hostname): - self._write_hostname(hostname, '/etc/sysconfig/network') + def set_hostname(self, hostname, fqdn=None): + # See: http://bit.ly/TwitgL + # Should be fqdn if we can use it + sysconfig_hostname = fqdn + if not sysconfig_hostname: + sysconfig_hostname = hostname + self._write_hostname(sysconfig_hostname, '/etc/sysconfig/network') LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -165,28 +170,32 @@ class Distro(distros.Distro): } self._update_sysconfig_file(out_fn, host_cfg) - def update_hostname(self, hostname, prev_file): + def update_hostname(self, hostname, fqdn, prev_file): + # See: http://bit.ly/TwitgL + # Should be fqdn if we can use it + sysconfig_hostname = fqdn + if not sysconfig_hostname: + sysconfig_hostname = hostname hostname_prev = self._read_hostname(prev_file) hostname_in_sys = self._read_hostname("/etc/sysconfig/network") update_files = [] - if not hostname_prev or hostname_prev != hostname: + if not hostname_prev or hostname_prev != sysconfig_hostname: update_files.append(prev_file) if (not hostname_in_sys or (hostname_in_sys == hostname_prev - and hostname_in_sys != hostname)): + and hostname_in_sys != sysconfig_hostname)): update_files.append("/etc/sysconfig/network") for fn in update_files: try: - self._write_hostname(hostname, fn) + self._write_hostname(sysconfig_hostname, fn) except: util.logexc(LOG, "Failed to write hostname %s to %s", - hostname, fn) + sysconfig_hostname, fn) if (hostname_in_sys and hostname_prev and hostname_in_sys != hostname_prev): LOG.debug(("%s differs from /etc/sysconfig/network." " Assuming user maintained hostname."), prev_file) if "/etc/sysconfig/network" in update_files: - # Only do this if we are running in non-adjusted root mode LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) -- cgit v1.2.3 From bd01f3466e10ca515a8e8aec42d00201f40cbd53 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 8 Nov 2012 17:41:36 -0800 Subject: Forgot the test! --- .../test_handler/test_handler_set_hostname.py | 58 ++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 tests/unittests/test_handler/test_handler_set_hostname.py diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py new file mode 100644 index 00000000..a1aba62f --- /dev/null +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -0,0 +1,58 @@ +from cloudinit.config import cc_set_hostname + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util + +from tests.unittests import helpers as t_help + +import logging + +from StringIO import StringIO + +from configobj import ConfigObj + +LOG = logging.getLogger(__name__) + + +class TestHostname(t_help.FilesystemMockingTestCase): + def setUp(self): + super(TestHostname, self).setUp() + self.tmp = self.makeDir(prefix="unittest_") + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def test_write_hostname_rhel(self): + cfg = { + 'hostname': 'blah.blah.blah.yahoo.com', + } + distro = self._fetch_distro('rhel') + paths = helpers.Paths({}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/sysconfig/network") + n_cfg = ConfigObj(StringIO(contents)) + self.assertEquals({'HOSTNAME': 'blah.blah.blah.yahoo.com'}, + dict(n_cfg)) + + def test_write_hostname_debian(self): + cfg = { + 'hostname': 'blah.blah.blah.yahoo.com', + } + distro = self._fetch_distro('debian') + paths = helpers.Paths({}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEquals('blah', contents.strip()) -- cgit v1.2.3 From 8c006684034c13719171672836edfc65bf02ebe9 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 Nov 2012 14:40:41 -0800 Subject: Fix pep8 warnings. --- cloudinit/distros/__init__.py | 2 +- tools/run-pep8 | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index bd04ba79..3392a065 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -81,7 +81,7 @@ class Distro(object): def _get_arch_package_mirror_info(self, arch=None): mirror_info = self.get_option("package_mirrors", []) - if arch == None: + if not arch: arch = self.get_primary_arch() return _get_arch_package_mirror_info(mirror_info, arch) diff --git a/tools/run-pep8 b/tools/run-pep8 index 1dfa92c3..20e594bc 100755 --- a/tools/run-pep8 +++ b/tools/run-pep8 @@ -31,6 +31,7 @@ IGNORE="$IGNORE,E125" # Continuation line does not distinguish itself from next IGNORE="$IGNORE,E126" # Continuation line over-indented for hanging indent IGNORE="$IGNORE,E127" # Continuation line over-indented for visual indent IGNORE="$IGNORE,E128" # Continuation line under-indented for visual indent +IGNORE="$IGNORE,E502" # The backslash is redundant between brackets cmd=( ${base}/hacking.py -- cgit v1.2.3 From 1169dcc5f18fd9a5adbf353bec87e48d563550a5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 Nov 2012 15:28:35 -0800 Subject: Fix the merging of group configuration when that group configuration is a dict => members. LP: #1077245 --- cloudinit/distros/__init__.py | 32 +++++++++++++++++++--- .../test_distros/test_user_data_normalize.py | 22 +++++++++++++++ 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3392a065..2d01efc3 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -425,12 +425,36 @@ def _get_arch_package_mirror_info(package_mirrors, arch): # is the standard form used in the rest # of cloud-init def _normalize_groups(grp_cfg): - if isinstance(grp_cfg, (str, basestring, list)): + if isinstance(grp_cfg, (str, basestring)): + grp_cfg = grp_cfg.strip().split(",") + if isinstance(grp_cfg, (list)): c_grp_cfg = {} - for i in util.uniq_merge(grp_cfg): - c_grp_cfg[i] = [] + for i in grp_cfg: + if isinstance(i, (dict)): + for k, v in i.items(): + if k not in c_grp_cfg: + if isinstance(v, (list)): + c_grp_cfg[k] = list(v) + elif isinstance(v, (basestring, str)): + c_grp_cfg[k] = [v] + else: + raise TypeError("Bad group member type %s" % + util.obj_name(v)) + else: + if isinstance(v, (list)): + c_grp_cfg[k].extend(v) + elif isinstance(v, (basestring, str)): + c_grp_cfg[k].append(v) + else: + raise TypeError("Bad group member type %s" % + util.obj_name(v)) + elif isinstance(i, (str, basestring)): + if i not in c_grp_cfg: + c_grp_cfg[i] = [] + else: + raise TypeError("Unknown group name type %s" % + util.obj_name(i)) grp_cfg = c_grp_cfg - groups = {} if isinstance(grp_cfg, (dict)): for (grp_name, grp_members) in grp_cfg.items(): diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index 8f0d8896..50400c8a 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -30,6 +30,28 @@ class TestUGNormalize(MockerTestCase): def _norm(self, cfg, distro): return distros.normalize_users_groups(cfg, distro) + def test_group_dict(self): + distro = self._make_distro('ubuntu') + g = {'groups': [ + { + 'ubuntu': ['foo', 'bar'], + 'bob': 'users', + }, + 'cloud-users', + { + 'bob': 'users2', + }, + ] + } + (users, groups) = self._norm(g, distro) + self.assertIn('ubuntu', groups) + ub_members = groups['ubuntu'] + self.assertEquals(sorted(['foo', 'bar']), sorted(ub_members)) + self.assertIn('bob', groups) + b_members = groups['bob'] + self.assertEquals(sorted(['users', 'users2']), + sorted(b_members)) + def test_basic_groups(self): distro = self._make_distro('ubuntu') ug_cfg = { -- cgit v1.2.3 From a17a69c35c1de0a6bd6f054f76d3da9e4a9c5364 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 10 Nov 2012 10:15:16 -0800 Subject: Sudoers.d creation cleanups + tests. --- cloudinit/distros/__init__.py | 20 +++++++++++------ tests/unittests/helpers.py | 1 + tests/unittests/test_distros/test_generic.py | 32 +++++++++++++++++++++++++--- tests/unittests/test_runs/test_merge_run.py | 2 +- 4 files changed, 45 insertions(+), 10 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2d01efc3..d2cb0a8b 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -283,8 +283,10 @@ class Distro(object): # Ensure the dir is included and that # it actually exists as a directory sudoers_contents = '' + base_exists = False if os.path.exists(sudo_base): sudoers_contents = util.load_file(sudo_base) + base_exists = True found_include = False for line in sudoers_contents.splitlines(): line = line.strip() @@ -299,18 +301,24 @@ class Distro(object): found_include = True break if not found_include: - sudoers_contents += "\n#includedir %s\n" % (path) try: - if not os.path.exists(sudo_base): + if not base_exists: + lines = [('# See sudoers(5) for more information' + ' on "#include" directives:'), '', + '# Added by cloud-init', + "#includedir %s" % (path), ''] + sudoers_contents = "\n".join(lines) util.write_file(sudo_base, sudoers_contents, 0440) else: - with open(sudo_base, 'a') as f: - f.write(sudoers_contents) - LOG.debug("added '#includedir %s' to %s" % (path, sudo_base)) + lines = ['', '# Added by cloud-init', + "#includedir %s" % (path), ''] + sudoers_contents = "\n".join(lines) + util.append_file(sudo_base, sudoers_contents) + LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base)) except IOError as e: util.logexc(LOG, "Failed to write %s" % sudo_base, e) raise e - util.ensure_dir(path, 0755) + util.ensure_dir(path, 0750) def write_sudo_rules(self, user, diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 2c5dcad2..e8080668 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -103,6 +103,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchUtils(self, new_root): patch_funcs = { util: [('write_file', 1), + ('append_file', 1), ('load_file', 1), ('ensure_dir', 1), ('chmod', 1), diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 2df4c2f0..704699b5 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -1,6 +1,9 @@ -from mocker import MockerTestCase - from cloudinit import distros +from cloudinit import util + +from tests.unittests import helpers + +import os unknown_arch_info = { 'arches': ['default'], @@ -27,7 +30,7 @@ gpmi = distros._get_package_mirror_info # pylint: disable=W0212 gapmi = distros._get_arch_package_mirror_info # pylint: disable=W0212 -class TestGenericDistro(MockerTestCase): +class TestGenericDistro(helpers.FilesystemMockingTestCase): def return_first(self, mlist): if not mlist: @@ -52,6 +55,29 @@ class TestGenericDistro(MockerTestCase): # Make a temp directoy for tests to use. self.tmp = self.makeDir() + def test_sudoers_ensure_new(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + d.ensure_sudo_dir("/b") + contents = util.load_file("/etc/sudoers") + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + + def test_sudoers_ensure_append(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + util.write_file("/etc/sudoers", "josh, josh\n") + d.ensure_sudo_dir("/b") + contents = util.load_file("/etc/sudoers") + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + self.assertIn("josh", contents) + self.assertEquals(2, contents.count("josh")) + def test_arch_package_mirror_info_unknown(self): """for an unknown arch, we should get back that with arch 'default'.""" arch_mirrors = gapmi(package_mirrors, arch="unknown") diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index 04c03730..36de97ae 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -7,7 +7,7 @@ from cloudinit import stages from cloudinit import util -class TestSimpleRun(helpers.FilesystemMockingTestCase): +class TestMergeRun(helpers.FilesystemMockingTestCase): def _patchIn(self, root): self.restore() self.patchOS(root) -- cgit v1.2.3 From efa2dfa699bc9222105850641a2820ceda9bfe67 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 10 Nov 2012 21:57:06 -0500 Subject: update ChangeLog LP: #1076811 --- ChangeLog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ChangeLog b/ChangeLog index 8f304db3..52f47e3c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -16,6 +16,12 @@ - Remove usage of paths.join, as all code should run through util helpers - Fix pylint complaining about tests folder 'helpers.py' not being found - Add a pylintrc file that is used instead options hidden in 'run_pylint' + - fix bug where cloud-config from user-data could not affect system_info + settings [revno 703] (LP: #1076811) + - for write fqdn to system config for rh/fedora [revno 704] + - add yaml/cloud config examples checking tool [revno 706] + - Fix the merging of group configuration when that group configuration is a + dict => members. [revno 707] 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From 9cb3130b0b87eabe458df7dc113f00431ff3648e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 10 Nov 2012 22:24:01 -0500 Subject: revert unrelated whitespace changes that creeped in. --- cloudinit/distros/__init__.py | 38 +++++++++++++++------------- cloudinit/sources/DataSourceAltCloud.py | 2 +- cloudinit/util.py | 3 ++- tests/unittests/test_runs/test_simple_run.py | 6 ++--- tools/hacking.py | 4 +-- 5 files changed, 27 insertions(+), 26 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 8a98e334..d2cb0a8b 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -187,23 +187,23 @@ class Distro(object): # inputs. If something goes wrong, we can end up with a system # that nobody can login to. adduser_opts = { - "gecos": '--comment', - "homedir": '--home', - "primary_group": '--gid', - "groups": '--groups', - "passwd": '--password', - "shell": '--shell', - "expiredate": '--expiredate', - "inactive": '--inactive', - "selinux_user": '--selinux-user', - } + "gecos": '--comment', + "homedir": '--home', + "primary_group": '--gid', + "groups": '--groups', + "passwd": '--password', + "shell": '--shell', + "expiredate": '--expiredate', + "inactive": '--inactive', + "selinux_user": '--selinux-user', + } adduser_opts_flags = { - "no_user_group": '--no-user-group', - "system": '--system', - "no_log_init": '--no-log-init', - "no_create_home": "-M", - } + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + "no_create_home": "-M", + } # Now check the value and create the command for option in kwargs: @@ -320,9 +320,11 @@ class Distro(object): raise e util.ensure_dir(path, 0750) - def write_sudo_rules(self, user, rules, sudo_file=None): - if not sudo_file: - sudo_file = "/etc/sudoers.d/90-cloud-init-users" + def write_sudo_rules(self, + user, + rules, + sudo_file="/etc/sudoers.d/90-cloud-init-users", + ): content_header = "# user rules for %s" % user content = "%s\n%s %s\n\n" % (content_header, user, rules) diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 9812bdcb..d7e1204f 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -47,7 +47,7 @@ META_DATA_NOT_SUPPORTED = { 'instance-id': 455, 'local-hostname': 'localhost', 'placement': {}, -} + } def read_user_data_callback(mount_dir): diff --git a/cloudinit/util.py b/cloudinit/util.py index 4f5b15ee..7890a3d6 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1193,7 +1193,8 @@ def yaml_dumps(obj): indent=4, explicit_start=True, explicit_end=True, - default_flow_style=False) + default_flow_style=False, + ) return formatted diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 60ef812a..22d6cf2c 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -37,13 +37,11 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], - 'write_files': [ - { + 'write_files': [{ 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0755, - }, - ], + }], 'cloud_init_modules': ['write-files'], } cloud_cfg = util.yaml_dumps(cfg) diff --git a/tools/hacking.py b/tools/hacking.py index 26a07c53..11163df3 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -66,8 +66,8 @@ def cloud_import_alphabetical(physical_line, line_number, lines): # handle import x # use .lower since capitalization shouldn't dictate order split_line = import_normalize(physical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2]) - split_previous = split_previous.strip().lower().split() + split_previous = import_normalize(lines[line_number - 2] + ).strip().lower().split() # with or without "as y" length = [2, 4] if (len(split_line) in length and len(split_previous) in length and -- cgit v1.2.3 From d7a42777c83668111d9bb496f581605b33a0b4f2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 10 Nov 2012 22:27:49 -0500 Subject: 1 pep8 and 1 pylint fix --- cloudinit/helpers.py | 2 +- tests/unittests/test_runs/test_merge_run.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index d26625a0..794f00ec 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -76,7 +76,7 @@ def canon_sem_name(name): class FileSemaphores(object): - def __init__(self, sem_path): + def __init__(self, sem_path): self.sem_path = sem_path @contextlib.contextmanager diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index 36de97ae..d9c3a455 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -33,7 +33,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): initer.initialize() initer.fetch() initer.datasource.userdata_raw = ud - iid = initer.instancify() + _iid = initer.instancify() initer.update() initer.cloudify().run('consume_userdata', initer.consume_userdata, -- cgit v1.2.3 From 3248ac9bbb2008e88a3bd9c030ba0fcbc14b7fce Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 10 Nov 2012 22:32:49 -0500 Subject: whitespace / indentation cleanups These changes were pulled out of the previous merge (cc_yum_add_repo) as they were unrelated there. Re-applying them here. --- cloudinit/distros/__init__.py | 38 +++++++++++++--------------- cloudinit/sources/DataSourceAltCloud.py | 2 +- cloudinit/util.py | 3 +-- tests/unittests/test_runs/test_simple_run.py | 6 +++-- tools/hacking.py | 4 +-- 5 files changed, 26 insertions(+), 27 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index d2cb0a8b..8a98e334 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -187,23 +187,23 @@ class Distro(object): # inputs. If something goes wrong, we can end up with a system # that nobody can login to. adduser_opts = { - "gecos": '--comment', - "homedir": '--home', - "primary_group": '--gid', - "groups": '--groups', - "passwd": '--password', - "shell": '--shell', - "expiredate": '--expiredate', - "inactive": '--inactive', - "selinux_user": '--selinux-user', - } + "gecos": '--comment', + "homedir": '--home', + "primary_group": '--gid', + "groups": '--groups', + "passwd": '--password', + "shell": '--shell', + "expiredate": '--expiredate', + "inactive": '--inactive', + "selinux_user": '--selinux-user', + } adduser_opts_flags = { - "no_user_group": '--no-user-group', - "system": '--system', - "no_log_init": '--no-log-init', - "no_create_home": "-M", - } + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + "no_create_home": "-M", + } # Now check the value and create the command for option in kwargs: @@ -320,11 +320,9 @@ class Distro(object): raise e util.ensure_dir(path, 0750) - def write_sudo_rules(self, - user, - rules, - sudo_file="/etc/sudoers.d/90-cloud-init-users", - ): + def write_sudo_rules(self, user, rules, sudo_file=None): + if not sudo_file: + sudo_file = "/etc/sudoers.d/90-cloud-init-users" content_header = "# user rules for %s" % user content = "%s\n%s %s\n\n" % (content_header, user, rules) diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index d7e1204f..9812bdcb 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -47,7 +47,7 @@ META_DATA_NOT_SUPPORTED = { 'instance-id': 455, 'local-hostname': 'localhost', 'placement': {}, - } +} def read_user_data_callback(mount_dir): diff --git a/cloudinit/util.py b/cloudinit/util.py index 7890a3d6..4f5b15ee 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1193,8 +1193,7 @@ def yaml_dumps(obj): indent=4, explicit_start=True, explicit_end=True, - default_flow_style=False, - ) + default_flow_style=False) return formatted diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 22d6cf2c..60ef812a 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -37,11 +37,13 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], - 'write_files': [{ + 'write_files': [ + { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0755, - }], + }, + ], 'cloud_init_modules': ['write-files'], } cloud_cfg = util.yaml_dumps(cfg) diff --git a/tools/hacking.py b/tools/hacking.py index 11163df3..26a07c53 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -66,8 +66,8 @@ def cloud_import_alphabetical(physical_line, line_number, lines): # handle import x # use .lower since capitalization shouldn't dictate order split_line = import_normalize(physical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2] - ).strip().lower().split() + split_previous = import_normalize(lines[line_number - 2]) + split_previous = split_previous.strip().lower().split() # with or without "as y" length = [2, 4] if (len(split_line) in length and len(split_previous) in length and -- cgit v1.2.3 From 71ba36704132ff8597dfc0e45b34e0c4424e239f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 11 Nov 2012 21:49:10 -0500 Subject: config-drive-v2: populate metadata['public-keys'] from 'public_keys' other datasources populate 'public-keys' rather than 'public_keys' and there is a more complete handler in the base DataSource. So, to take advantage of that, have DataSourceConfigDrive copy public_keys to public-keys, and remove the 'get_public_ssh_keys' from the DataSourcEConfigDrive. LP: #1077700 --- cloudinit/sources/DataSourceConfigDrive.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 9729cfb9..dbbedce1 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -218,11 +218,6 @@ class DataSourceConfigDrive(sources.DataSource): return True - def get_public_ssh_keys(self): - if not 'public-keys' in self.metadata: - return [] - return self.metadata['public-keys'] - class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): @@ -331,6 +326,13 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): except KeyError: raise BrokenConfigDriveDir("No uuid entry in metadata") + # other datasources (and config-drive-v1) populate metadata['public-keys'] + # where as with config-drive-v2, that would be 'public_keys'. So, just + # copy the field if it is present + if ('public_keys' in results['metadata'] and not + 'public-keys' in results['metadata']): + results['public-keys'] = results['public_keys'] + def read_content_path(item): # do not use os.path.join here, as content_path starts with / cpath = os.path.sep.join((source_dir, "openstack", -- cgit v1.2.3