From ea21cecc94b613b063d17588d4cd6612bc56753b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:15:48 -0700 Subject: Add in a configuration module that can write out the yum.repo format for those that want to hook into different repos for installing. --- cloudinit/config/cc_yum_add_repo.py | 93 +++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 cloudinit/config/cc_yum_add_repo.py (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py new file mode 100644 index 00000000..41dc72a0 --- /dev/null +++ b/cloudinit/config/cc_yum_add_repo.py @@ -0,0 +1,93 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this + +import os + +from cloudinit import templater +from cloudinit import util + +import configobj + + +def _canonicalize_id(repo_id): + repo_id = repo_id.lower().replace("-", "_") + repo_id = repo_id.replace(" ", "_") + return repo_id + + +## TODO(harlowja): move to distro? +# See man yum.conf +def _format_repository_config(repo_id, repo_config): + to_be = configobj.ConfigObj() + to_be[repo_id] = {} + # Do basic translation of + for (k, v) in repo_config.items(): + if isinstance(v, bool): + if v: + v = '1' + else: + v = '0' + elif isinstance(v, (tuple, list)): + v = "\n ".join(v) + # For now assume that peopel using this know + # the format of yum and don't verify further + to_be[repo_id][k] = v + lines = to_be.write() + return "\n".join(lines) + + +def handle(name, cfg, cloud, log, _args): + repos = cfg.get('yum_repos') + if not repos: + log.debug(("Skipping module named %s," + " no 'yum_repos' configuration found"), name) + return + repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir', + '/etc/yum.repos.d/') + repo_locations = {} + repo_configs = {} + for (repo_id, repo_config) in repos.items(): + canon_repo_id = _canonicalize_id(repo_id) + repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) + if os.path.exists(repo_fn_pth): + log.info("Skipping repo %s, file %s already exists!", + repo_id, repo_fn_pth) + continue + elif canon_repo_id in repo_locations: + log.info("Skipping repo %s, file %s already pending!", + repo_id, repo_fn_pth) + continue + if not repo_config: + repo_config = {} + # Do some basic sanity checks/cleaning + n_repo_config = {} + for (k, v) in repo_config.items(): + k = k.lower().strip().replace("-", "_") + if k: + n_repo_config[k] = v + repo_config = n_repo_config + if not 'baseurl' in repo_config: + log.warn("Repository %s does not contain a baseurl address", repo_id) + else: + repo_configs[canon_repo_id] = repo_config + repo_locations[canon_repo_id] = repo_fn_pth + for (c_repo_id, path) in repo_locations.items(): + repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) + util.write_file(path, repo_blob) -- cgit v1.2.3 From 19c640ebfd3832ec1582c6c134ea68efac95588c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:17:20 -0700 Subject: Remove some pylint buggies. --- cloudinit/config/cc_yum_add_repo.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 41dc72a0..10c423b5 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -20,7 +20,6 @@ import os -from cloudinit import templater from cloudinit import util import configobj @@ -53,7 +52,7 @@ def _format_repository_config(repo_id, repo_config): return "\n".join(lines) -def handle(name, cfg, cloud, log, _args): +def handle(name, cfg, _cloud, log, _args): repos = cfg.get('yum_repos') if not repos: log.debug(("Skipping module named %s," @@ -84,10 +83,12 @@ def handle(name, cfg, cloud, log, _args): n_repo_config[k] = v repo_config = n_repo_config if not 'baseurl' in repo_config: - log.warn("Repository %s does not contain a baseurl address", repo_id) + log.warn("Repository %s does not contain a baseurl address", + repo_id) else: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth for (c_repo_id, path) in repo_locations.items(): - repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) + repo_blob = _format_repository_config(c_repo_id, + repo_configs.get(c_repo_id)) util.write_file(path, repo_blob) -- cgit v1.2.3 From e8371b113c078f3017c44804662960da9a04abf2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:19:14 -0700 Subject: Fix copyright. --- cloudinit/config/cc_yum_add_repo.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 10c423b5..f0487773 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -1,10 +1,8 @@ # vi: ts=4 expandtab # -# Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. # -# Author: Scott Moser -# Author: Juerg Haefliger +# Author: Joshua Harlow # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -16,7 +14,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this +# along with this program. If not, see . import os -- cgit v1.2.3 From cf80962e51a925d5b895d0ef90c2a15ad99778b3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:24:15 -0700 Subject: Update comments as to why format is the way it is. --- cloudinit/config/cc_yum_add_repo.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index f0487773..fda3236f 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -34,14 +34,17 @@ def _canonicalize_id(repo_id): def _format_repository_config(repo_id, repo_config): to_be = configobj.ConfigObj() to_be[repo_id] = {} - # Do basic translation of + # Do basic translation of the items -> values for (k, v) in repo_config.items(): if isinstance(v, bool): + # Seems like yum prefers 1/0 if v: v = '1' else: v = '0' elif isinstance(v, (tuple, list)): + # Can handle 'lists' in certain cases + # See: http://bit.ly/Qqrf1t v = "\n ".join(v) # For now assume that peopel using this know # the format of yum and don't verify further -- cgit v1.2.3 From 7029732d496181233f2115dbfd65b13d20aceca7 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 19:18:15 -0700 Subject: Add a more generic package install mechansim that removes some of the code in apt_update_upgrade to do upgrades and installs and places it in a generic package module and adjusts some of the reboot backoffs and log flushing/sleeping that was happening there. --- cloudinit/config/cc_apt_configure.py | 258 +++++++++++++++++ cloudinit/config/cc_apt_update_upgrade.py | 305 --------------------- .../config/cc_package_update_upgrade_install.py | 110 ++++++++ 3 files changed, 368 insertions(+), 305 deletions(-) create mode 100644 cloudinit/config/cc_apt_configure.py delete mode 100644 cloudinit/config/cc_apt_update_upgrade.py create mode 100644 cloudinit/config/cc_package_update_upgrade_install.py (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py new file mode 100644 index 00000000..2cf65ecc --- /dev/null +++ b/cloudinit/config/cc_apt_configure.py @@ -0,0 +1,258 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2009-2010 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import glob +import os + +from cloudinit import templater +from cloudinit import util + +distros = ['ubuntu', 'debian'] + +PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" +PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" + +# A temporary shell program to get a given gpg key +# from a given keyserver +EXPORT_GPG_KEYID = """ + k=${1} ks=${2}; + exec 2>/dev/null + [ -n "$k" ] || exit 1; + armour=$(gpg --list-keys --armour "${k}") + if [ -z "${armour}" ]; then + gpg --keyserver ${ks} --recv $k >/dev/null && + armour=$(gpg --export --armour "${k}") && + gpg --batch --yes --delete-keys "${k}" + fi + [ -n "${armour}" ] && echo "${armour}" +""" + + +def handle(name, cfg, cloud, log, _args): + release = get_release() + mirrors = find_apt_mirror_info(cloud, cfg) + if not mirrors or "primary" not in mirrors: + log.debug(("Skipping module named %s," + " no package 'mirror' located"), name) + return + + # backwards compatibility + mirror = mirrors["primary"] + mirrors["mirror"] = mirror + + log.debug("Mirror info: %s" % mirrors) + + if not util.get_cfg_option_bool(cfg, + 'apt_preserve_sources_list', False): + generate_sources_list(release, mirrors, cloud, log) + old_mirrors = cfg.get('apt_old_mirrors', + {"primary": "archive.ubuntu.com/ubuntu", + "security": "security.ubuntu.com/ubuntu"}) + rename_apt_lists(old_mirrors, mirrors) + + # Set up any apt proxy + proxy = cfg.get("apt_proxy", None) + proxy_filename = PROXY_FN + if proxy: + try: + # See man 'apt.conf' + contents = PROXY_TPL % (proxy) + util.write_file(cloud.paths.join(False, proxy_filename), + contents) + except Exception as e: + util.logexc(log, "Failed to write proxy to %s", proxy_filename) + elif os.path.isfile(proxy_filename): + util.del_file(proxy_filename) + + # Process 'apt_sources' + if 'apt_sources' in cfg: + params = mirrors + params['RELEASE'] = release + params['MIRROR'] = mirror + errors = add_sources(cloud, cfg['apt_sources'], params) + for e in errors: + log.warn("Add source error: %s", ':'.join(e)) + + dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) + if dconf_sel: + log.debug("Setting debconf selections per cloud config") + try: + util.subp(('debconf-set-selections', '-'), dconf_sel) + except Exception: + util.logexc(log, "Failed to run debconf-set-selections") + + +# get gpg keyid from keyserver +def getkeybyid(keyid, keyserver): + with util.ExtendedTemporaryFile(suffix='.sh') as fh: + fh.write(EXPORT_GPG_KEYID) + fh.flush() + cmd = ['/bin/sh', fh.name, keyid, keyserver] + (stdout, _stderr) = util.subp(cmd) + return stdout.strip() + + +def mirror2lists_fileprefix(mirror): + string = mirror + # take off http:// or ftp:// + if string.endswith("/"): + string = string[0:-1] + pos = string.find("://") + if pos >= 0: + string = string[pos + 3:] + string = string.replace("/", "_") + return string + + +def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): + for (name, omirror) in old_mirrors.iteritems(): + nmirror = new_mirrors.get(name) + if not nmirror: + continue + oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) + nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror)) + if oprefix == nprefix: + continue + olen = len(oprefix) + for filename in glob.glob("%s_*" % oprefix): + util.rename(filename, "%s%s" % (nprefix, filename[olen:])) + + +def get_release(): + (stdout, _stderr) = util.subp(['lsb_release', '-cs']) + return stdout.strip() + + +def generate_sources_list(codename, mirrors, cloud, log): + template_fn = cloud.get_template_filename('sources.list') + if not template_fn: + log.warn("No template found, not rendering /etc/apt/sources.list") + return + + params = {'codename': codename} + for k in mirrors: + params[k] = mirrors[k] + out_fn = cloud.paths.join(False, '/etc/apt/sources.list') + templater.render_to_file(template_fn, out_fn, params) + + +def add_sources(cloud, srclist, template_params=None): + """ + add entries in /etc/apt/sources.list.d for each abbreviated + sources.list entry in 'srclist'. When rendering template, also + include the values in dictionary searchList + """ + if template_params is None: + template_params = {} + + errorlist = [] + for ent in srclist: + if 'source' not in ent: + errorlist.append(["", "missing source"]) + continue + + source = ent['source'] + if source.startswith("ppa:"): + try: + util.subp(["add-apt-repository", source]) + except: + errorlist.append([source, "add-apt-repository failed"]) + continue + + source = templater.render_string(source, template_params) + + if 'filename' not in ent: + ent['filename'] = 'cloud_config_sources.list' + + if not ent['filename'].startswith("/"): + ent['filename'] = os.path.join("/etc/apt/sources.list.d/", + ent['filename']) + + if ('keyid' in ent and 'key' not in ent): + ks = "keyserver.ubuntu.com" + if 'keyserver' in ent: + ks = ent['keyserver'] + try: + ent['key'] = getkeybyid(ent['keyid'], ks) + except: + errorlist.append([source, "failed to get key from %s" % ks]) + continue + + if 'key' in ent: + try: + util.subp(('apt-key', 'add', '-'), ent['key']) + except: + errorlist.append([source, "failed add key"]) + + try: + contents = "%s\n" % (source) + util.write_file(cloud.paths.join(False, ent['filename']), + contents, omode="ab") + except: + errorlist.append([source, + "failed write to file %s" % ent['filename']]) + + return errorlist + + +def find_apt_mirror_info(cloud, cfg): + """find an apt_mirror given the cloud and cfg provided.""" + + mirror = None + + # this is less preferred way of specifying mirror preferred would be to + # use the distro's search or package_mirror. + mirror = cfg.get("apt_mirror", None) + + search = cfg.get("apt_mirror_search", None) + if not mirror and search: + mirror = util.search_for_mirror(search) + + if (not mirror and + util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): + mydom = "" + doms = [] + + # if we have a fqdn, then search its domain portion first + (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) + mydom = ".".join(fqdn.split(".")[1:]) + if mydom: + doms.append(".%s" % mydom) + + doms.extend((".localdomain", "",)) + + mirror_list = [] + distro = cloud.distro.name + mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) + for post in doms: + mirror_list.append(mirrorfmt % (post)) + + mirror = util.search_for_mirror(mirror_list) + + mirror_info = cloud.datasource.get_package_mirror_info() + + # this is a bit strange. + # if mirror is set, then one of the legacy options above set it + # but they do not cover security. so we need to get that from + # get_package_mirror_info + if mirror: + mirror_info.update({'primary': mirror}) + + return mirror_info diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py deleted file mode 100644 index 356bb98d..00000000 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ /dev/null @@ -1,305 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# -# Author: Scott Moser -# Author: Juerg Haefliger -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import glob -import os -import time - -from cloudinit import templater -from cloudinit import util - -distros = ['ubuntu', 'debian'] - -PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" -PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" - -# A temporary shell program to get a given gpg key -# from a given keyserver -EXPORT_GPG_KEYID = """ - k=${1} ks=${2}; - exec 2>/dev/null - [ -n "$k" ] || exit 1; - armour=$(gpg --list-keys --armour "${k}") - if [ -z "${armour}" ]; then - gpg --keyserver ${ks} --recv $k >/dev/null && - armour=$(gpg --export --armour "${k}") && - gpg --batch --yes --delete-keys "${k}" - fi - [ -n "${armour}" ] && echo "${armour}" -""" - - -def handle(name, cfg, cloud, log, _args): - update = util.get_cfg_option_bool(cfg, 'apt_update', False) - upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) - - release = get_release() - mirrors = find_apt_mirror_info(cloud, cfg) - if not mirrors or "primary" not in mirrors: - log.debug(("Skipping module named %s," - " no package 'mirror' located"), name) - return - - # backwards compatibility - mirror = mirrors["primary"] - mirrors["mirror"] = mirror - - log.debug("mirror info: %s" % mirrors) - - if not util.get_cfg_option_bool(cfg, - 'apt_preserve_sources_list', False): - generate_sources_list(release, mirrors, cloud, log) - old_mirrors = cfg.get('apt_old_mirrors', - {"primary": "archive.ubuntu.com/ubuntu", - "security": "security.ubuntu.com/ubuntu"}) - rename_apt_lists(old_mirrors, mirrors) - - # Set up any apt proxy - proxy = cfg.get("apt_proxy", None) - proxy_filename = PROXY_FN - if proxy: - try: - # See man 'apt.conf' - contents = PROXY_TPL % (proxy) - util.write_file(cloud.paths.join(False, proxy_filename), - contents) - except Exception as e: - util.logexc(log, "Failed to write proxy to %s", proxy_filename) - elif os.path.isfile(proxy_filename): - util.del_file(proxy_filename) - - # Process 'apt_sources' - if 'apt_sources' in cfg: - params = mirrors - params['RELEASE'] = release - params['MIRROR'] = mirror - errors = add_sources(cloud, cfg['apt_sources'], params) - for e in errors: - log.warn("Source Error: %s", ':'.join(e)) - - dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) - if dconf_sel: - log.debug("setting debconf selections per cloud config") - try: - util.subp(('debconf-set-selections', '-'), dconf_sel) - except: - util.logexc(log, "Failed to run debconf-set-selections") - - pkglist = util.get_cfg_option_list(cfg, 'packages', []) - - errors = [] - if update or len(pkglist) or upgrade: - try: - cloud.distro.update_package_sources() - except Exception as e: - util.logexc(log, "Package update failed") - errors.append(e) - - if upgrade: - try: - cloud.distro.package_command("upgrade") - except Exception as e: - util.logexc(log, "Package upgrade failed") - errors.append(e) - - if len(pkglist): - try: - cloud.distro.install_packages(pkglist) - except Exception as e: - util.logexc(log, "Failed to install packages: %s ", pkglist) - errors.append(e) - - # kernel and openssl (possibly some other packages) - # write a file /var/run/reboot-required after upgrading. - # if that file exists and configured, then just stop right now and reboot - # TODO(smoser): handle this less voilently - reboot_file = "/var/run/reboot-required" - if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and - os.path.isfile(reboot_file)): - log.warn("rebooting after upgrade or install per %s" % reboot_file) - time.sleep(1) # give the warning time to get out - util.subp(["/sbin/reboot"]) - time.sleep(60) - log.warn("requested reboot did not happen!") - errors.append(Exception("requested reboot did not happen!")) - - if len(errors): - log.warn("%s failed with exceptions, re-raising the last one", - len(errors)) - raise errors[-1] - - -# get gpg keyid from keyserver -def getkeybyid(keyid, keyserver): - with util.ExtendedTemporaryFile(suffix='.sh') as fh: - fh.write(EXPORT_GPG_KEYID) - fh.flush() - cmd = ['/bin/sh', fh.name, keyid, keyserver] - (stdout, _stderr) = util.subp(cmd) - return stdout.strip() - - -def mirror2lists_fileprefix(mirror): - string = mirror - # take off http:// or ftp:// - if string.endswith("/"): - string = string[0:-1] - pos = string.find("://") - if pos >= 0: - string = string[pos + 3:] - string = string.replace("/", "_") - return string - - -def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): - for (name, omirror) in old_mirrors.iteritems(): - nmirror = new_mirrors.get(name) - if not nmirror: - continue - oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) - nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror)) - if oprefix == nprefix: - continue - olen = len(oprefix) - for filename in glob.glob("%s_*" % oprefix): - util.rename(filename, "%s%s" % (nprefix, filename[olen:])) - - -def get_release(): - (stdout, _stderr) = util.subp(['lsb_release', '-cs']) - return stdout.strip() - - -def generate_sources_list(codename, mirrors, cloud, log): - template_fn = cloud.get_template_filename('sources.list') - if not template_fn: - log.warn("No template found, not rendering /etc/apt/sources.list") - return - - params = {'codename': codename} - for k in mirrors: - params[k] = mirrors[k] - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) - - -def add_sources(cloud, srclist, template_params=None): - """ - add entries in /etc/apt/sources.list.d for each abbreviated - sources.list entry in 'srclist'. When rendering template, also - include the values in dictionary searchList - """ - if template_params is None: - template_params = {} - - errorlist = [] - for ent in srclist: - if 'source' not in ent: - errorlist.append(["", "missing source"]) - continue - - source = ent['source'] - if source.startswith("ppa:"): - try: - util.subp(["add-apt-repository", source]) - except: - errorlist.append([source, "add-apt-repository failed"]) - continue - - source = templater.render_string(source, template_params) - - if 'filename' not in ent: - ent['filename'] = 'cloud_config_sources.list' - - if not ent['filename'].startswith("/"): - ent['filename'] = os.path.join("/etc/apt/sources.list.d/", - ent['filename']) - - if ('keyid' in ent and 'key' not in ent): - ks = "keyserver.ubuntu.com" - if 'keyserver' in ent: - ks = ent['keyserver'] - try: - ent['key'] = getkeybyid(ent['keyid'], ks) - except: - errorlist.append([source, "failed to get key from %s" % ks]) - continue - - if 'key' in ent: - try: - util.subp(('apt-key', 'add', '-'), ent['key']) - except: - errorlist.append([source, "failed add key"]) - - try: - contents = "%s\n" % (source) - util.write_file(cloud.paths.join(False, ent['filename']), - contents, omode="ab") - except: - errorlist.append([source, - "failed write to file %s" % ent['filename']]) - - return errorlist - - -def find_apt_mirror_info(cloud, cfg): - """find an apt_mirror given the cloud and cfg provided.""" - - mirror = None - - # this is less preferred way of specifying mirror preferred would be to - # use the distro's search or package_mirror. - mirror = cfg.get("apt_mirror", None) - - search = cfg.get("apt_mirror_search", None) - if not mirror and search: - mirror = util.search_for_mirror(search) - - if (not mirror and - util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): - mydom = "" - doms = [] - - # if we have a fqdn, then search its domain portion first - (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) - mydom = ".".join(fqdn.split(".")[1:]) - if mydom: - doms.append(".%s" % mydom) - - doms.extend((".localdomain", "",)) - - mirror_list = [] - distro = cloud.distro.name - mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) - for post in doms: - mirror_list.append(mirrorfmt % (post)) - - mirror = util.search_for_mirror(mirror_list) - - mirror_info = cloud.datasource.get_package_mirror_info() - - # this is a bit strange. - # if mirror is set, then one of the legacy options above set it - # but they do not cover security. so we need to get that from - # get_package_mirror_info - if mirror: - mirror_info.update({'primary': mirror}) - - return mirror_info diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py new file mode 100644 index 00000000..e319e147 --- /dev/null +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -0,0 +1,110 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from logging import StreamHandler +import os +import time + +from cloudinit import util + +REBOOT_FILE = "/var/run/reboot-required" +REBOOT_CMD = ["/sbin/reboot"] + + +def _multi_cfg_bool_get(cfg, *keys): + for k in keys: + if util.get_cfg_option_bool(cfg, k, False): + return True + return False + + +def _flush_loggers(root): + for h in root.handlers: + if isinstance(h, (StreamHandler)): + try: + h.flush() + except IOError: + pass + if root.parent: + _flush_loggers(root.parent) + + +def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): + util.subp(REBOOT_CMD) + start = time.time() + wait_time = initial_sleep + for _i in range(0, wait_attempts): + time.sleep(wait_time) + wait_time *= backoff + elapsed = time.time() - start + log.debug("Rebooted, but still running after %s seconds", int(elapsed)) + # If we got here, not good + elapsed = time.time() - start + raise RuntimeError(("Reboot did not happen" + " after %s seconds!") % (int(elapsed))) + + +def handle(_name, cfg, cloud, log, _args): + # Handle the old style + new config names + update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update') + upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade') + reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required', + 'package_reboot_if_required') + pkglist = util.get_cfg_option_list(cfg, 'packages', []) + + errors = [] + if update or len(pkglist) or upgrade: + try: + cloud.distro.update_package_sources() + except Exception as e: + util.logexc(log, "Package update failed") + errors.append(e) + + if upgrade: + try: + cloud.distro.package_command("upgrade") + except Exception as e: + util.logexc(log, "Package upgrade failed") + errors.append(e) + + if len(pkglist): + try: + cloud.distro.install_packages(pkglist) + except Exception as e: + util.logexc(log, "Failed to install packages: %s", pkglist) + errors.append(e) + + # TODO(smoser): handle this less violently + # kernel and openssl (possibly some other packages) + # write a file /var/run/reboot-required after upgrading. + # if that file exists and configured, then just stop right now and reboot + reboot_fn_exists = os.path.isfile(REBOOT_FILE) + if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: + try: + log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE) + # Flush the above warning + anything else out... + _flush_loggers(log) + _fire_reboot(log) + except Exception as e: + util.logexc(log, "Requested reboot did not happen!") + errors.append(e) + + if len(errors): + log.warn("%s failed with exceptions, re-raising the last one", + len(errors)) + raise errors[-1] -- cgit v1.2.3 From 85a412c172044ae89d381f69ddb309ce8b3cea6e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 21:14:51 -0700 Subject: Move the recursive flushing to the log module. --- cloudinit/config/cc_package_update_upgrade_install.py | 15 ++------------- cloudinit/log.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 13 deletions(-) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index e319e147..73b0e30d 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -16,10 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from logging import StreamHandler import os import time +from cloudinit import log as logging from cloudinit import util REBOOT_FILE = "/var/run/reboot-required" @@ -33,17 +33,6 @@ def _multi_cfg_bool_get(cfg, *keys): return False -def _flush_loggers(root): - for h in root.handlers: - if isinstance(h, (StreamHandler)): - try: - h.flush() - except IOError: - pass - if root.parent: - _flush_loggers(root.parent) - - def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): util.subp(REBOOT_CMD) start = time.time() @@ -98,7 +87,7 @@ def handle(_name, cfg, cloud, log, _args): try: log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE) # Flush the above warning + anything else out... - _flush_loggers(log) + logging.flushLoggers(log) _fire_reboot(log) except Exception as e: util.logexc(log, "Requested reboot did not happen!") diff --git a/cloudinit/log.py b/cloudinit/log.py index 2333e5ee..da6c2851 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -53,6 +53,18 @@ def setupBasicLogging(): root.setLevel(DEBUG) +def flushLoggers(root): + if not root: + return + for h in root.handlers: + if isinstance(h, (logging.StreamHandler)): + try: + h.flush() + except IOError: + pass + flushLoggers(root.parent) + + def setupLogging(cfg=None): # See if the config provides any logging conf... if not cfg: -- cgit v1.2.3 From 8e38c908da002b350d26845ffb5931b4efce1ca8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 21:20:19 -0700 Subject: Add in a created by cloud-init header. --- cloudinit/config/cc_yum_add_repo.py | 1 + 1 file changed, 1 insertion(+) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index fda3236f..ee117bcd 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -50,6 +50,7 @@ def _format_repository_config(repo_id, repo_config): # the format of yum and don't verify further to_be[repo_id][k] = v lines = to_be.write() + lines.insert(0, util.make_header()) return "\n".join(lines) -- cgit v1.2.3 From 0b73122b6a7c63d4ab1b117c6c0c136876fefd16 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 17 Oct 2012 09:32:35 -0700 Subject: Make the yum value 'string' translation a function. --- cloudinit/config/cc_yum_add_repo.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index ee117bcd..123f7827 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -29,6 +29,19 @@ def _canonicalize_id(repo_id): return repo_id +def _format_repo_value(val): + if isinstance(val, (bool)): + # Seems like yum prefers 1/0 + return str(int(val)) + if isinstance(val, (list, tuple)): + # Can handle 'lists' in certain cases + # See: http://bit.ly/Qqrf1t + return "\n ".join([_format_repo_value(v) for v in val]) + if not isinstance(val, (basestring, str)): + return str(val) + return val + + ## TODO(harlowja): move to distro? # See man yum.conf def _format_repository_config(repo_id, repo_config): @@ -36,19 +49,9 @@ def _format_repository_config(repo_id, repo_config): to_be[repo_id] = {} # Do basic translation of the items -> values for (k, v) in repo_config.items(): - if isinstance(v, bool): - # Seems like yum prefers 1/0 - if v: - v = '1' - else: - v = '0' - elif isinstance(v, (tuple, list)): - # Can handle 'lists' in certain cases - # See: http://bit.ly/Qqrf1t - v = "\n ".join(v) - # For now assume that peopel using this know - # the format of yum and don't verify further - to_be[repo_id][k] = v + # For now assume that people using this know + # the format of yum and don't verify keys/values further + to_be[repo_id][k] = _format_repo_value(v) lines = to_be.write() lines.insert(0, util.make_header()) return "\n".join(lines) -- cgit v1.2.3 From 04f52eb593e4f5114626c74fd8f3c5a9a8d440bd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 17 Oct 2012 09:42:22 -0700 Subject: More cleanups, fix header function. --- cloudinit/config/cc_yum_add_repo.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 123f7827..5c273825 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -53,7 +53,7 @@ def _format_repository_config(repo_id, repo_config): # the format of yum and don't verify keys/values further to_be[repo_id][k] = _format_repo_value(v) lines = to_be.write() - lines.insert(0, util.make_header()) + lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822())) return "\n".join(lines) @@ -87,12 +87,19 @@ def handle(name, cfg, _cloud, log, _args): if k: n_repo_config[k] = v repo_config = n_repo_config - if not 'baseurl' in repo_config: - log.warn("Repository %s does not contain a baseurl address", - repo_id) - else: + missing_required = 0 + for req_field in ['baseurl']: + if not req_field in repo_config: + log.warn(("Repository %s does not contain a %s" + " configuration 'required' entry"), + repo_id, req_field) + missing_required += 1 + if not missing_required: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth + else: + log.warn("Repository %s is missing %s required fields, skipping!", + repo_id, missing_required) for (c_repo_id, path) in repo_locations.items(): repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) -- cgit v1.2.3 From aa8b51a48a30e3a3c863ca0ddb8bc4667026d57a Mon Sep 17 00:00:00 2001 From: harlowja Date: Sat, 27 Oct 2012 19:25:48 -0700 Subject: Helpful cleanups. 1. Remove the usage of the path.join function now that all code should be going through the util file methods (and they can be mocked out as needed). 2. Adjust all occurences of the above join function to either not use it or replace it with the standard os.path.join (which can also be mocked out as needed) 3. Fix pylint from complaining about the tests folder 'helpers.py' not being found 4. Add a pylintrc file that is used instead of the options hidden in the 'run_pylint' tool. --- Makefile | 8 +-- cloudinit/config/cc_apt_pipelining.py | 12 ++-- cloudinit/config/cc_apt_update_upgrade.py | 13 ++-- cloudinit/config/cc_ca_certs.py | 24 ++++---- cloudinit/config/cc_chef.py | 30 +++++----- cloudinit/config/cc_landscape.py | 14 ++--- cloudinit/config/cc_mcollective.py | 22 +++---- cloudinit/config/cc_mounts.py | 9 ++- cloudinit/config/cc_phone_home.py | 4 +- cloudinit/config/cc_puppet.py | 70 ++++++++++------------ cloudinit/config/cc_resizefs.py | 5 +- cloudinit/config/cc_rsyslog.py | 3 +- cloudinit/config/cc_runcmd.py | 2 +- cloudinit/config/cc_salt_minion.py | 6 +- cloudinit/config/cc_set_passwords.py | 6 +- cloudinit/config/cc_ssh.py | 16 +++-- cloudinit/config/cc_ssh_authkey_fingerprints.py | 7 +-- cloudinit/config/cc_update_etc_hosts.py | 3 +- cloudinit/distros/__init__.py | 8 +-- cloudinit/distros/debian.py | 26 +++----- cloudinit/helpers.py | 29 +-------- cloudinit/sources/__init__.py | 2 - cloudinit/ssh_util.py | 26 ++++---- pylintrc | 19 ++++++ tests/__init__.py | 0 tests/unittests/__init__.py | 0 tests/unittests/test_datasource/__init__.py | 0 tests/unittests/test_distros/__init__.py | 0 tests/unittests/test_filters/__init__.py | 0 tests/unittests/test_filters/test_launch_index.py | 10 +--- tests/unittests/test_handler/__init__.py | 0 .../test_handler/test_handler_ca_certs.py | 18 +++--- tests/unittests/test_runs/__init__.py | 0 tests/unittests/test_runs/test_simple_run.py | 10 +--- tools/run-pylint | 19 ++---- 35 files changed, 170 insertions(+), 251 deletions(-) create mode 100644 pylintrc create mode 100644 tests/__init__.py create mode 100644 tests/unittests/__init__.py create mode 100644 tests/unittests/test_datasource/__init__.py create mode 100644 tests/unittests/test_distros/__init__.py create mode 100644 tests/unittests/test_filters/__init__.py create mode 100644 tests/unittests/test_handler/__init__.py create mode 100644 tests/unittests/test_runs/__init__.py (limited to 'cloudinit/config') diff --git a/Makefile b/Makefile index 49324ca0..8f5646b7 100644 --- a/Makefile +++ b/Makefile @@ -1,20 +1,20 @@ CWD=$(shell pwd) -PY_FILES=$(shell find cloudinit bin tests tools -name "*.py") +PY_FILES=$(shell find cloudinit bin tests tools -type f -name "*.py") PY_FILES+="bin/cloud-init" all: test pep8: - $(CWD)/tools/run-pep8 $(PY_FILES) + @$(CWD)/tools/run-pep8 $(PY_FILES) pylint: - $(CWD)/tools/run-pylint $(PY_FILES) + @$(CWD)/tools/run-pylint $(PY_FILES) pyflakes: pyflakes $(PY_FILES) test: - nosetests $(noseopts) tests/unittests/ + @nosetests $(noseopts) tests/ 2to3: 2to3 $(PY_FILES) diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 02056ee0..e5629175 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -34,26 +34,24 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" # on TCP connections - otherwise data corruption will occur. -def handle(_name, cfg, cloud, log, _args): +def handle(_name, cfg, _cloud, log, _args): apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": - write_apt_snippet(cloud, "0", log, DEFAULT_FILE) + write_apt_snippet("0", log, DEFAULT_FILE) elif apt_pipe_value_s in ("none", "unchanged", "os"): return elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: - write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE) + write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) -def write_apt_snippet(cloud, setting, log, f_name): +def write_apt_snippet(setting, log, f_name): """Writes f_name with apt pipeline depth 'setting'.""" file_contents = APT_PIPE_TPL % (setting) - - util.write_file(cloud.paths.join(False, f_name), file_contents) - + util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 356bb98d..59c34b59 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -78,8 +78,7 @@ def handle(name, cfg, cloud, log, _args): try: # See man 'apt.conf' contents = PROXY_TPL % (proxy) - util.write_file(cloud.paths.join(False, proxy_filename), - contents) + util.write_file(proxy_filename, contents) except Exception as e: util.logexc(log, "Failed to write proxy to %s", proxy_filename) elif os.path.isfile(proxy_filename): @@ -90,7 +89,7 @@ def handle(name, cfg, cloud, log, _args): params = mirrors params['RELEASE'] = release params['MIRROR'] = mirror - errors = add_sources(cloud, cfg['apt_sources'], params) + errors = add_sources(cfg['apt_sources'], params) for e in errors: log.warn("Source Error: %s", ':'.join(e)) @@ -196,11 +195,10 @@ def generate_sources_list(codename, mirrors, cloud, log): params = {'codename': codename} for k in mirrors: params[k] = mirrors[k] - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/apt/sources.list', params) -def add_sources(cloud, srclist, template_params=None): +def add_sources(srclist, template_params=None): """ add entries in /etc/apt/sources.list.d for each abbreviated sources.list entry in 'srclist'. When rendering template, also @@ -250,8 +248,7 @@ def add_sources(cloud, srclist, template_params=None): try: contents = "%s\n" % (source) - util.write_file(cloud.paths.join(False, ent['filename']), - contents, omode="ab") + util.write_file(ent['filename'], contents, omode="ab") except: errorlist.append([source, "failed write to file %s" % ent['filename']]) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index dc046bda..20f24357 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -22,6 +22,7 @@ CA_CERT_PATH = "/usr/share/ca-certificates/" CA_CERT_FILENAME = "cloud-init-ca-certs.crt" CA_CERT_CONFIG = "/etc/ca-certificates.conf" CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" +CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) distros = ['ubuntu', 'debian'] @@ -33,7 +34,7 @@ def update_ca_certs(): util.subp(["update-ca-certificates"], capture=False) -def add_ca_certs(paths, certs): +def add_ca_certs(certs): """ Adds certificates to the system. To actually apply the new certificates you must also call L{update_ca_certs}. @@ -43,27 +44,24 @@ def add_ca_certs(paths, certs): if certs: # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) - cert_file_fullpath = paths.join(False, cert_file_fullpath) - util.write_file(cert_file_fullpath, cert_file_contents, mode=0644) + util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644) # Append cert filename to CA_CERT_CONFIG file. - util.write_file(paths.join(False, CA_CERT_CONFIG), - "\n%s" % CA_CERT_FILENAME, omode="ab") + util.write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="ab") -def remove_default_ca_certs(paths): +def remove_default_ca_certs(): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. """ - util.delete_dir_contents(paths.join(False, CA_CERT_PATH)) - util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH)) - util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644) + util.delete_dir_contents(CA_CERT_PATH) + util.delete_dir_contents(CA_CERT_SYSTEM_PATH) + util.write_file(CA_CERT_CONFIG, "", mode=0644) debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" util.subp(('debconf-set-selections', '-'), debconf_sel) -def handle(name, cfg, cloud, log, _args): +def handle(name, cfg, _cloud, log, _args): """ Call to handle ca-cert sections in cloud-config file. @@ -85,14 +83,14 @@ def handle(name, cfg, cloud, log, _args): # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs(cloud.paths) + remove_default_ca_certs() # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") if trusted_certs: log.debug("Adding %d certificates" % len(trusted_certs)) - add_ca_certs(cloud.paths, trusted_certs) + add_ca_certs(trusted_certs) # Update the system with the new cert configuration. log.debug("Updating certificates") diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 6f568261..7a3d6a31 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -26,6 +26,15 @@ from cloudinit import util RUBY_VERSION_DEFAULT = "1.8" +CHEF_DIRS = [ + '/etc/chef', + '/var/log/chef', + '/var/lib/chef', + '/var/cache/chef', + '/var/backups/chef', + '/var/run/chef', +] + def handle(name, cfg, cloud, log, _args): @@ -37,24 +46,15 @@ def handle(name, cfg, cloud, log, _args): chef_cfg = cfg['chef'] # Ensure the chef directories we use exist - c_dirs = [ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', - ] - for d in c_dirs: - util.ensure_dir(cloud.paths.join(False, d)) + for d in CHEF_DIRS: + util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: - v_fn = cloud.paths.join(False, '/etc/chef/validation.pem') - util.write_file(v_fn, chef_cfg[key]) + util.write_file('/etc/chef/validation.pem', chef_cfg[key]) break # Create the chef config from template @@ -68,8 +68,7 @@ def handle(name, cfg, cloud, log, _args): '_default'), 'validation_name': chef_cfg['validation_name'] } - out_fn = cloud.paths.join(False, '/etc/chef/client.rb') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/chef/client.rb', params) else: log.warn("No template found, not rendering to /etc/chef/client.rb") @@ -81,8 +80,7 @@ def handle(name, cfg, cloud, log, _args): initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] - firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json') - util.write_file(firstboot_fn, json.dumps(initial_json)) + util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' if not os.path.isfile('/usr/bin/chef-client'): diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 56ab0ce3..02610dd0 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -66,22 +66,16 @@ def handle(_name, cfg, cloud, log, _args): merge_data = [ LSC_BUILTIN_CFG, - cloud.paths.join(True, LSC_CLIENT_CFG_FILE), + LSC_CLIENT_CFG_FILE, ls_cloudcfg, ] merged = merge_together(merge_data) - - lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE) - lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn)) - if not os.path.isdir(lsc_dir): - util.ensure_dir(lsc_dir) - contents = StringIO() merged.write(contents) - contents.flush() - util.write_file(lsc_client_fn, contents.getvalue()) - log.debug("Wrote landscape config file to %s", lsc_client_fn) + util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) + util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue()) + log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE) util.write_file(LS_DEFAULT_FILE, "RUN=1\n") util.subp(["service", "landscape-client", "restart"]) diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 2acdbc6f..b670390d 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -29,6 +29,7 @@ from cloudinit import util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" +SERVER_CFG = '/etc/mcollective/server.cfg' def handle(name, cfg, cloud, log, _args): @@ -48,26 +49,23 @@ def handle(name, cfg, cloud, log, _args): if 'conf' in mcollective_cfg: # Read server.cfg values from the # original file in order to be able to mix the rest up - server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg') - mcollective_config = ConfigObj(server_cfg_fn) + mcollective_config = ConfigObj(SERVER_CFG) # See: http://tiny.cc/jh9agw for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): if cfg_name == 'public-cert': - pubcert_fn = cloud.paths.join(True, PUBCERT_FILE) - util.write_file(pubcert_fn, cfg, mode=0644) - mcollective_config['plugin.ssl_server_public'] = pubcert_fn + util.write_file(PUBCERT_FILE, cfg, mode=0644) + mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE mcollective_config['securityprovider'] = 'ssl' elif cfg_name == 'private-cert': - pricert_fn = cloud.paths.join(True, PRICERT_FILE) - util.write_file(pricert_fn, cfg, mode=0600) - mcollective_config['plugin.ssl_server_private'] = pricert_fn + util.write_file(PRICERT_FILE, cfg, mode=0600) + mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE mcollective_config['securityprovider'] = 'ssl' else: if isinstance(cfg, (basestring, str)): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): - # Iterate throug the config items, create a section + # Iterate through the config items, create a section # if it is needed and then add/or create items as needed if cfg_name not in mcollective_config.sections: mcollective_config[cfg_name] = {} @@ -78,14 +76,12 @@ def handle(name, cfg, cloud, log, _args): mcollective_config[cfg_name] = str(cfg) # We got all our config as wanted we'll rename # the previous server.cfg and create our new one - old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old') - util.rename(server_cfg_fn, old_fn) + util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG)) # Now we got the whole file, write to disk... contents = StringIO() mcollective_config.write(contents) contents = contents.getvalue() - server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg') - util.write_file(server_cfg_rw, contents, mode=0644) + util.write_file(SERVER_CFG, contents, mode=0644) # Start mcollective util.subp(['service', 'mcollective', 'start'], capture=False) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 14c965bb..cb772c86 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -28,6 +28,7 @@ from cloudinit import util SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$" SHORTNAME = re.compile(SHORTNAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) +FSTAB_PATH = "/etc/fstab" def is_mdname(name): @@ -167,8 +168,7 @@ def handle(_name, cfg, cloud, log, _args): cc_lines.append('\t'.join(line)) fstab_lines = [] - fstab = util.load_file(cloud.paths.join(True, "/etc/fstab")) - for line in fstab.splitlines(): + for line in util.load_file(FSTAB_PATH).splitlines(): try: toks = WS.split(line) if toks[3].find(comment) != -1: @@ -179,7 +179,7 @@ def handle(_name, cfg, cloud, log, _args): fstab_lines.extend(cc_lines) contents = "%s\n" % ('\n'.join(fstab_lines)) - util.write_file(cloud.paths.join(False, "/etc/fstab"), contents) + util.write_file(FSTAB_PATH, contents) if needswap: try: @@ -188,9 +188,8 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Activating swap via 'swapon -a' failed") for d in dirs: - real_dir = cloud.paths.join(False, d) try: - util.ensure_dir(real_dir) + util.ensure_dir(d) except: util.logexc(log, "Failed to make '%s' config-mount", d) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index ae1349eb..886487f8 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -84,10 +84,10 @@ def handle(name, cfg, cloud, log, args): for (n, path) in pubkeys.iteritems(): try: - all_keys[n] = util.load_file(cloud.paths.join(True, path)) + all_keys[n] = util.load_file(path) except: util.logexc(log, ("%s: failed to open, can not" - " phone home that data"), path) + " phone home that data!"), path) submit_keys = {} for k in post_list: diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 74ee18e1..8fe3af57 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -21,12 +21,32 @@ from StringIO import StringIO import os -import pwd import socket from cloudinit import helpers from cloudinit import util +PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' +PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/' +PUPPET_SSL_DIR = '/var/lib/puppet/ssl' +PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem' + + +def _autostart_puppet(log): + # Set puppet to automatically start + if os.path.exists('/etc/default/puppet'): + util.subp(['sed', '-i', + '-e', 's/^START=.*/START=yes/', + '/etc/default/puppet'], capture=False) + elif os.path.exists('/bin/systemctl'): + util.subp(['/bin/systemctl', 'enable', 'puppet.service'], + capture=False) + elif os.path.exists('/sbin/chkconfig'): + util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + else: + log.warn(("Sorry we do not know how to enable" + " puppet services on this system")) + def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything @@ -43,8 +63,7 @@ def handle(name, cfg, cloud, log, _args): # ... and then update the puppet configuration if 'conf' in puppet_cfg: # Add all sections from the conf object to puppet.conf - puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf') - contents = util.load_file(puppet_conf_fn) + contents = util.load_file(PUPPET_CONF_PATH) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to @@ -53,28 +72,19 @@ def handle(name, cfg, cloud, log, _args): cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), - filename=puppet_conf_fn) + filename=PUPPET_CONF_PATH) for (cfg_name, cfg) in puppet_cfg['conf'].iteritems(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership - pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl') - util.ensure_dir(pp_ssl_dir, 0771) - util.chownbyid(pp_ssl_dir, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_certs = cloud.paths.join(False, - '/var/lib/puppet/ssl/certs/') - util.ensure_dir(pp_ssl_certs) - util.chownbyid(pp_ssl_certs, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_ca_certs = cloud.paths.join(False, - ('/var/lib/puppet/' - 'ssl/certs/ca.pem')) - util.write_file(pp_ssl_ca_certs, cfg) - util.chownbyid(pp_ssl_ca_certs, - pwd.getpwnam('puppet').pw_uid, 0) + util.ensure_dir(PUPPET_SSL_DIR, 0771) + util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') + util.ensure_dir(PUPPET_SSL_CERT_DIR) + util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') + util.write_file(PUPPET_SSL_CERT_PATH, str(cfg)) + util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') else: # Iterate throug the config items, we'll use ConfigParser.set # to overwrite or create new items as needed @@ -90,25 +100,11 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - conf_old_fn = cloud.paths.join(False, - '/etc/puppet/puppet.conf.old') - util.rename(puppet_conf_fn, conf_old_fn) - puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf') - util.write_file(puppet_conf_rw, puppet_config.stringify()) + util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH)) + util.write_file(PUPPET_CONF_PATH, puppet_config.stringify()) - # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - util.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - util.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) - else: - log.warn(("Sorry we do not know how to enable" - " puppet services on this system")) + # Set it up so it autostarts + _autostart_puppet(log) # Start puppetd util.subp(['service', 'puppet', 'start'], capture=False) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index e7f27944..b958f332 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -62,7 +62,7 @@ def get_fs_type(st_dev, path, log): raise -def handle(name, cfg, cloud, log, args): +def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] else: @@ -74,11 +74,10 @@ def handle(name, cfg, cloud, log, args): # TODO(harlowja) is the directory ok to be used?? resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") - resize_root_d = cloud.paths.join(False, resize_root_d) util.ensure_dir(resize_root_d) # TODO(harlowja): allow what is to be resized to be configurable?? - resize_what = cloud.paths.join(False, "/") + resize_what = "/" with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", dir=resize_root_d, delete=True) as tfh: devpth = tfh.name diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 78327526..0c2c6880 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -71,8 +71,7 @@ def handle(name, cfg, cloud, log, _args): try: contents = "%s\n" % (content) - util.write_file(cloud.paths.join(False, filename), - contents, omode=omode) + util.write_file(filename, contents, omode=omode) except Exception: util.logexc(log, "Failed to write to %s", filename) diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 65064cfb..598c3a3e 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args): cmd = cfg["runcmd"] try: content = util.shellify(cmd) - util.write_file(cloud.paths.join(False, out_fn), content, 0700) + util.write_file(out_fn, content, 0700) except: util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn) diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index 8a1440d9..f3eede18 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -34,8 +34,7 @@ def handle(name, cfg, cloud, log, _args): cloud.distro.install_packages(["salt-minion"]) # Ensure we can configure files at the right dir - config_dir = cloud.paths.join(False, salt_cfg.get("config_dir", - '/etc/salt')) + config_dir = salt_cfg.get("config_dir", '/etc/salt') util.ensure_dir(config_dir) # ... and then update the salt configuration @@ -47,8 +46,7 @@ def handle(name, cfg, cloud, log, _args): # ... copy the key pair if specified if 'public_key' in salt_cfg and 'private_key' in salt_cfg: - pki_dir = cloud.paths.join(False, salt_cfg.get('pki_dir', - '/etc/salt/pki')) + pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki') with util.umask(077): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 26c558ad..c6bf62fd 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -114,8 +114,7 @@ def handle(_name, cfg, cloud, log, args): replaced_auth = False # See: man sshd_config - conf_fn = cloud.paths.join(True, ssh_util.DEF_SSHD_CFG) - old_lines = ssh_util.parse_ssh_config(conf_fn) + old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) new_lines = [] i = 0 for (i, line) in enumerate(old_lines): @@ -134,8 +133,7 @@ def handle(_name, cfg, cloud, log, args): pw_auth)) lines = [str(e) for e in new_lines] - ssh_rw_fn = cloud.paths.join(False, ssh_util.DEF_SSHD_CFG) - util.write_file(ssh_rw_fn, "\n".join(lines)) + util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: cmd = ['service'] diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 32e48c30..b623d476 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -59,7 +59,7 @@ def handle(_name, cfg, cloud, log, _args): # remove the static keys from the pristine image if cfg.get("ssh_deletekeys", True): - key_pth = cloud.paths.join(False, "/etc/ssh/", "ssh_host_*key*") + key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*") for f in glob.glob(key_pth): try: util.del_file(f) @@ -72,8 +72,7 @@ def handle(_name, cfg, cloud, log, _args): if key in KEY_2_FILE: tgt_fn = KEY_2_FILE[key][0] tgt_perms = KEY_2_FILE[key][1] - util.write_file(cloud.paths.join(False, tgt_fn), - val, tgt_perms) + util.write_file(tgt_fn, val, tgt_perms) for (priv, pub) in PRIV_2_PUB.iteritems(): if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: @@ -94,7 +93,7 @@ def handle(_name, cfg, cloud, log, _args): 'ssh_genkeytypes', GENERATE_KEY_NAMES) for keytype in genkeys: - keyfile = cloud.paths.join(False, KEY_FILE_TPL % (keytype)) + keyfile = KEY_FILE_TPL % (keytype) util.ensure_dir(os.path.dirname(keyfile)) if not os.path.exists(keyfile): cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] @@ -118,17 +117,16 @@ def handle(_name, cfg, cloud, log, _args): cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) - apply_credentials(keys, user, cloud.paths, - disable_root, disable_root_opts) + apply_credentials(keys, user, disable_root, disable_root_opts) except: util.logexc(log, "Applying ssh credentials failed!") -def apply_credentials(keys, user, paths, disable_root, disable_root_opts): +def apply_credentials(keys, user, disable_root, disable_root_opts): keys = set(keys) if user: - ssh_util.setup_user_keys(keys, user, '', paths) + ssh_util.setup_user_keys(keys, user, '') if disable_root: if not user: @@ -137,4 +135,4 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts): else: key_prefix = '' - ssh_util.setup_user_keys(keys, 'root', key_prefix, paths) + ssh_util.setup_user_keys(keys, 'root', key_prefix) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 8c9a8806..c38bcea2 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -97,9 +97,8 @@ def handle(name, cfg, cloud, log, _args): "logging of ssh fingerprints disabled"), name) hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") - extract_func = ssh_util.extract_authorized_keys (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): - (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) - _pprint_key_entries(user_name, auth_key_fn, - auth_key_entries, hash_meth) + (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name) + _pprint_key_entries(user_name, key_fn, + key_entries, hash_meth) diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 4d75000f..96103615 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -42,8 +42,7 @@ def handle(name, cfg, cloud, log, _args): raise RuntimeError(("No hosts template could be" " found for distro %s") % (cloud.distro.name)) - out_fn = cloud.paths.join(False, '/etc/hosts') - templater.render_to_file(tpl_fn_name, out_fn, + templater.render_to_file(tpl_fn_name, '/etc/hosts', {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2fbb0e9b..869540d2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -122,8 +122,7 @@ class Distro(object): new_etchosts = StringIO() need_write = False need_change = True - hosts_ro_fn = self._paths.join(True, "/etc/hosts") - for line in util.load_file(hosts_ro_fn).splitlines(): + for line in util.load_file("/etc/hosts").splitlines(): if line.strip().startswith(header): continue if not line.strip() or line.strip().startswith("#"): @@ -147,8 +146,7 @@ class Distro(object): need_write = True if need_write: contents = new_etchosts.getvalue() - util.write_file(self._paths.join(False, "/etc/hosts"), - contents, mode=0644) + util.write_file("/etc/hosts", contents, mode=0644) def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] @@ -262,7 +260,7 @@ class Distro(object): # Import SSH keys if 'ssh_authorized_keys' in kwargs: keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, None, self._paths) + ssh_util.setup_user_keys(keys, name, key_prefix=None) return True diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 88f4e978..cc7e53a0 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -43,7 +43,7 @@ class Distro(distros.Distro): def apply_locale(self, locale, out_fn=None): if not out_fn: - out_fn = self._paths.join(False, '/etc/default/locale') + out_fn = '/etc/default/locale' util.subp(['locale-gen', locale], capture=False) util.subp(['update-locale', locale], capture=False) lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""] @@ -54,8 +54,7 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _write_network(self, settings): - net_fn = self._paths.join(False, "/etc/network/interfaces") - util.write_file(net_fn, settings) + util.write_file("/etc/network/interfaces", settings) return ['all'] def _bring_up_interfaces(self, device_names): @@ -69,12 +68,9 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_hostname(self, hostname): - out_fn = self._paths.join(False, "/etc/hostname") - self._write_hostname(hostname, out_fn) - if out_fn == '/etc/hostname': - # Only do this if we are running in non-adjusted root mode - LOG.debug("Setting hostname to %s", hostname) - util.subp(['hostname', hostname]) + self._write_hostname(hostname, "/etc/hostname") + LOG.debug("Setting hostname to %s", hostname) + util.subp(['hostname', hostname]) def _write_hostname(self, hostname, out_fn): # "" gives trailing newline. @@ -82,16 +78,14 @@ class Distro(distros.Distro): def update_hostname(self, hostname, prev_fn): hostname_prev = self._read_hostname(prev_fn) - read_fn = self._paths.join(True, "/etc/hostname") - hostname_in_etc = self._read_hostname(read_fn) + hostname_in_etc = self._read_hostname("/etc/hostname") update_files = [] if not hostname_prev or hostname_prev != hostname: update_files.append(prev_fn) if (not hostname_in_etc or (hostname_in_etc == hostname_prev and hostname_in_etc != hostname)): - write_fn = self._paths.join(False, "/etc/hostname") - update_files.append(write_fn) + update_files.append("/etc/hostname") for fn in update_files: try: self._write_hostname(hostname, fn) @@ -103,7 +97,6 @@ class Distro(distros.Distro): LOG.debug(("%s differs from /etc/hostname." " Assuming user maintained hostname."), prev_fn) if "/etc/hostname" in update_files: - # Only do this if we are running in non-adjusted root mode LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -130,9 +123,8 @@ class Distro(distros.Distro): " no file found at %s") % (tz, tz_file)) # "" provides trailing newline during join tz_lines = ["# Created by cloud-init", str(tz), ""] - tz_fn = self._paths.join(False, "/etc/timezone") - util.write_file(tz_fn, "\n".join(tz_lines)) - util.copy(tz_file, self._paths.join(False, "/etc/localtime")) + util.write_file("/etc/timezone", "\n".join(tz_lines)) + util.copy(tz_file, "/etc/localtime") def package_command(self, command, args=None): e = os.environ.copy() diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index a4b20208..985ce3e5 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -302,14 +302,10 @@ class Paths(object): def __init__(self, path_cfgs, ds=None): self.cfgs = path_cfgs # Populate all the initial paths - self.cloud_dir = self.join(False, - path_cfgs.get('cloud_dir', - '/var/lib/cloud')) + self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud') self.instance_link = os.path.join(self.cloud_dir, 'instance') self.boot_finished = os.path.join(self.instance_link, "boot-finished") self.upstart_conf_d = path_cfgs.get('upstart_dir') - if self.upstart_conf_d: - self.upstart_conf_d = self.join(False, self.upstart_conf_d) self.seed_dir = os.path.join(self.cloud_dir, 'seed') # This one isn't joined, since it should just be read-only template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/') @@ -328,29 +324,6 @@ class Paths(object): # Set when a datasource becomes active self.datasource = ds - # joins the paths but also appends a read - # or write root if available - def join(self, read_only, *paths): - if read_only: - root = self.cfgs.get('read_root') - else: - root = self.cfgs.get('write_root') - if not paths: - return root - if len(paths) > 1: - joined = os.path.join(*paths) - else: - joined = paths[0] - if root: - pre_joined = joined - # Need to remove any starting '/' since this - # will confuse os.path.join - joined = joined.lstrip("/") - joined = os.path.join(root, joined) - LOG.debug("Translated %s to adjusted path %s (read-only=%s)", - pre_joined, joined, read_only) - return joined - # get_ipath_cur: get the current instance path for an item def get_ipath_cur(self, name=None): ipath = self.instance_link diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index b22369a8..745627d0 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -20,8 +20,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from email.mime.multipart import MIMEMultipart - import abc import os diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 88a11a1a..dd6b742f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -212,17 +212,15 @@ def update_authorized_keys(old_entries, keys): return '\n'.join(lines) -def users_ssh_info(username, paths): +def users_ssh_info(username): pw_ent = pwd.getpwnam(username) - if not pw_ent: + if not pw_ent or not pw_ent.pw_dir: raise RuntimeError("Unable to get ssh info for user %r" % (username)) - ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh')) - return (ssh_dir, pw_ent) + return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent) -def extract_authorized_keys(username, paths): - (ssh_dir, pw_ent) = users_ssh_info(username, paths) - sshd_conf_fn = paths.join(True, DEF_SSHD_CFG) +def extract_authorized_keys(username): + (ssh_dir, pw_ent) = users_ssh_info(username) auth_key_fn = None with util.SeLinuxGuard(ssh_dir, recursive=True): try: @@ -231,7 +229,7 @@ def extract_authorized_keys(username, paths): # The following tokens are defined: %% is replaced by a literal # '%', %h is replaced by the home directory of the user being # authenticated and %u is replaced by the username of that user. - ssh_cfg = parse_ssh_config_map(sshd_conf_fn) + ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG) auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip() if not auth_key_fn: auth_key_fn = "%h/.ssh/authorized_keys" @@ -240,7 +238,6 @@ def extract_authorized_keys(username, paths): auth_key_fn = auth_key_fn.replace("%%", '%') if not auth_key_fn.startswith('/'): auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn) - auth_key_fn = paths.join(False, auth_key_fn) except (IOError, OSError): # Give up and use a default key filename auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') @@ -248,14 +245,13 @@ def extract_authorized_keys(username, paths): " in ssh config" " from %r, using 'AuthorizedKeysFile' file" " %r instead"), - sshd_conf_fn, auth_key_fn) - auth_key_entries = parse_authorized_keys(auth_key_fn) - return (auth_key_fn, auth_key_entries) + DEF_SSHD_CFG, auth_key_fn) + return (auth_key_fn, parse_authorized_keys(auth_key_fn)) -def setup_user_keys(keys, username, key_prefix, paths): +def setup_user_keys(keys, username, key_prefix): # Make sure the users .ssh dir is setup accordingly - (ssh_dir, pwent) = users_ssh_info(username, paths) + (ssh_dir, pwent) = users_ssh_info(username) if not os.path.isdir(ssh_dir): util.ensure_dir(ssh_dir, mode=0700) util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) @@ -267,7 +263,7 @@ def setup_user_keys(keys, username, key_prefix, paths): key_entries.append(parser.parse(str(k), def_opt=key_prefix)) # Extract the old and make the new - (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths) + (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) with util.SeLinuxGuard(ssh_dir, recursive=True): content = update_authorized_keys(auth_key_entries, key_entries) util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700) diff --git a/pylintrc b/pylintrc new file mode 100644 index 00000000..ee886510 --- /dev/null +++ b/pylintrc @@ -0,0 +1,19 @@ +[General] +init-hook='import sys; sys.path.append("tests/")' + +[MESSAGES CONTROL] +# See: http://pylint-messages.wikidot.com/all-codes +# W0142: *args and **kwargs are fine. +# W0511: TODOs in code comments are fine. +# W0702: No exception type(s) specified +# W0703: Catch "Exception" +# C0103: Invalid name +# C0111: Missing docstring +disable=W0142,W0511,W0702,W0703,C0103,C0111 + +[REPORTS] +reports=no +include-ids=yes + +[FORMAT] +max-line-length=79 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/test_datasource/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/test_distros/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/test_filters/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py index 1e9b9053..773bb312 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/test_filters/test_launch_index.py @@ -1,14 +1,6 @@ import copy -import os -import sys -top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") -top_dir = os.path.abspath(top_dir) -if os.path.exists(top_dir): - sys.path.insert(0, os.path.dirname(top_dir)) - - -import helpers +from tests.unittests import helpers import itertools diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/test_handler/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index d3df5c50..d73c9fa9 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -77,7 +77,7 @@ class TestConfig(MockerTestCase): """Test that a single cert gets passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1"]}} - self.mock_add(self.paths, ["CERT1"]) + self.mock_add(["CERT1"]) self.mock_update() self.mocker.replay() @@ -87,7 +87,7 @@ class TestConfig(MockerTestCase): """Test that multiple certs get passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} - self.mock_add(self.paths, ["CERT1", "CERT2"]) + self.mock_add(["CERT1", "CERT2"]) self.mock_update() self.mocker.replay() @@ -97,7 +97,7 @@ class TestConfig(MockerTestCase): """Test remove_defaults works as expected.""" config = {"ca-certs": {"remove-defaults": True}} - self.mock_remove(self.paths) + self.mock_remove() self.mock_update() self.mocker.replay() @@ -116,8 +116,8 @@ class TestConfig(MockerTestCase): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} - self.mock_remove(self.paths) - self.mock_add(self.paths, ["CERT1"]) + self.mock_remove() + self.mock_add(["CERT1"]) self.mock_update() self.mocker.replay() @@ -136,7 +136,7 @@ class TestAddCaCerts(MockerTestCase): """Test that no certificate are written if not provided.""" self.mocker.replace(util.write_file, passthrough=False) self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, []) + cc_ca_certs.add_ca_certs([]) def test_single_cert(self): """Test adding a single certificate to the trusted CAs.""" @@ -149,7 +149,7 @@ class TestAddCaCerts(MockerTestCase): "\ncloud-init-ca-certs.crt", omode="ab") self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, [cert]) + cc_ca_certs.add_ca_certs([cert]) def test_multiple_certs(self): """Test adding multiple certificates to the trusted CAs.""" @@ -163,7 +163,7 @@ class TestAddCaCerts(MockerTestCase): "\ncloud-init-ca-certs.crt", omode="ab") self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, certs) + cc_ca_certs.add_ca_certs(certs) class TestUpdateCaCerts(MockerTestCase): @@ -198,4 +198,4 @@ class TestRemoveDefaultCaCerts(MockerTestCase): "ca-certificates ca-certificates/trust_new_crts select no") self.mocker.replay() - cc_ca_certs.remove_default_ca_certs(self.paths) + cc_ca_certs.remove_default_ca_certs() diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/test_runs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 1e852e1e..22d6cf2c 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -1,14 +1,6 @@ import os -import sys -# Allow running this test individually -top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") -top_dir = os.path.abspath(top_dir) -if os.path.exists(top_dir): - sys.path.insert(0, os.path.dirname(top_dir)) - - -import helpers +from tests.unittests import helpers from cloudinit.settings import (PER_INSTANCE) from cloudinit import stages diff --git a/tools/run-pylint b/tools/run-pylint index 7ef44ac5..b74efda9 100755 --- a/tools/run-pylint +++ b/tools/run-pylint @@ -6,23 +6,16 @@ else files=( "$@" ); fi +RC_FILE="pylintrc" +if [ ! -f $RC_FILE ]; then + RC_FILE="../pylintrc" +fi + cmd=( pylint - --reports=n - --include-ids=y - --max-line-length=79 - + --rcfile=$RC_FILE --disable=R --disable=I - - --disable=W0142 # Used * or ** magic - --disable=W0511 # TODO/FIXME note - --disable=W0702 # No exception type(s) specified - --disable=W0703 # Catch "Exception" - - --disable=C0103 # Invalid name - --disable=C0111 # Missing docstring - "${files[@]}" ) -- cgit v1.2.3 From 791598f2929a5b8b6bb380f7f16ec568db96aba6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 7 Nov 2012 21:34:41 -0800 Subject: Start adding a 'migrator' module that can be used to aid in the moving of older versions of cloud-inits data to newer versions of cloud-inits data. 1. Move the semaphores for the current instance to there canonicalized names and use the canonicalized in the file 'locking' code --- cloudinit/config/cc_migrator.py | 53 +++++++++++++++++++++++++++++++++++++++++ cloudinit/helpers.py | 9 ++++++- config/cloud.cfg | 1 + 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 cloudinit/config/cc_migrator.py (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py new file mode 100644 index 00000000..b71d1d17 --- /dev/null +++ b/cloudinit/config/cc_migrator.py @@ -0,0 +1,53 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import shutil + +from cloudinit import helpers +from cloudinit import util + +from cloudinit.settings import PER_ALWAYS + +frequency = PER_ALWAYS + + +def _migrate_canon_sems(cloud): + sem_path = cloud.paths.get_ipath('sem') + if not sem_path or not os.path.exists(sem_path): + return 0 + am_adjusted = 0 + for p in os.listdir(sem_path): + full_path = os.path.join(sem_path, p) + if os.path.isfile(full_path): + canon_p = helpers.canon_sem_name(p) + if canon_p != p: + new_path = os.path.join(sem_path, p) + shutil.move(full_path, new_path) + am_adjusted += 1 + return am_adjusted + + +def handle(name, cfg, cloud, log, _args): + do_migrate = util.get_cfg_option_str(cfg, "migrate", True) + if not util.translate_bool(do_migrate): + log.debug("Skipping module named %s, migration disabled", name) + return + sems_moved = _migrate_canon_sems(cloud) + log.debug("Migrated %s semaphore files to there canonicalized names", + sems_moved) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 985ce3e5..d26625a0 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -71,12 +71,17 @@ class FileLock(object): return "<%s using file %r>" % (util.obj_name(self), self.fn) +def canon_sem_name(name): + return name.replace("-", "_") + + class FileSemaphores(object): - def __init__(self, sem_path): + def __init__(self, sem_path): self.sem_path = sem_path @contextlib.contextmanager def lock(self, name, freq, clear_on_fail=False): + name = canon_sem_name(name) try: yield self._acquire(name, freq) except: @@ -85,6 +90,7 @@ class FileSemaphores(object): raise def clear(self, name, freq): + name = canon_sem_name(name) sem_file = self._get_path(name, freq) try: util.del_file(sem_file) @@ -119,6 +125,7 @@ class FileSemaphores(object): def has_run(self, name, freq): if not freq or freq == PER_ALWAYS: return False + name = canon_sem_name(name) sem_file = self._get_path(name, freq) # This isn't really a good atomic check # but it suffices for where and when cloudinit runs diff --git a/config/cloud.cfg b/config/cloud.cfg index 05bb4eef..ad100fff 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -23,6 +23,7 @@ preserve_hostname: false # The modules that run in the 'init' stage cloud_init_modules: + - migrator - bootcmd - write-files - resizefs -- cgit v1.2.3 From 3de3c535f37e40a79b36997a93fa218534117397 Mon Sep 17 00:00:00 2001 From: harlowja Date: Wed, 7 Nov 2012 23:16:21 -0800 Subject: 1. Check the name and not the full path when applying the canon routine. 2. Add in a function to migrate legacy semaphores to new semaphores. --- cloudinit/config/cc_migrator.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index b71d1d17..56f33d42 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -35,14 +35,41 @@ def _migrate_canon_sems(cloud): for p in os.listdir(sem_path): full_path = os.path.join(sem_path, p) if os.path.isfile(full_path): - canon_p = helpers.canon_sem_name(p) - if canon_p != p: - new_path = os.path.join(sem_path, p) + (name, ext) = os.path.splitext(p) + canon_name = helpers.canon_sem_name(name) + if canon_name != name: + new_path = os.path.join(sem_path, canon_name + ext) shutil.move(full_path, new_path) am_adjusted += 1 return am_adjusted +def _migrate_legacy_sems(cloud, log): + sem_path = cloud.paths.get_ipath('sem') + touch_there = { + 'apt-update-upgrade': [ + 'apt-configure', + 'package-update-upgrade-install', + ], + } + sem_helper = helpers.FileSemaphores(sem_path) + for (mod_name, migrate_to) in touch_there.items(): + possibles = [mod_name, helpers.canon_sem_name(mod_name)] + old_exists = [] + for p in os.listdir(sem_path): + (name, _ext) = os.path.splitext(p) + if name in possibles and os.path.isfile(p): + old_exists.append(p) + for p in old_exists: + util.del_file(os.path.join(sem_path, p)) + (_name, freq) = os.path.splitext(p) + for m in migrate_to: + log.debug("Migrating %s => %s with the same frequency", + p, m) + with sem_helper.lock(m, freq): + pass + + def handle(name, cfg, cloud, log, _args): do_migrate = util.get_cfg_option_str(cfg, "migrate", True) if not util.translate_bool(do_migrate): @@ -51,3 +78,4 @@ def handle(name, cfg, cloud, log, _args): sems_moved = _migrate_canon_sems(cloud) log.debug("Migrated %s semaphore files to there canonicalized names", sems_moved) + _migrate_legacy_sems(cloud, log) -- cgit v1.2.3 From 196badd4cfa5f9f76be1138fb2d073649af3e031 Mon Sep 17 00:00:00 2001 From: harlowja Date: Wed, 7 Nov 2012 23:20:40 -0800 Subject: 1. Ensure that the sem_path exists and is actually a valid value returned. 2. Adjust variable naming --- cloudinit/config/cc_migrator.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 56f33d42..58232fc9 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -46,14 +46,16 @@ def _migrate_canon_sems(cloud): def _migrate_legacy_sems(cloud, log): sem_path = cloud.paths.get_ipath('sem') - touch_there = { + if not sem_path or not os.path.exists(sem_path): + return + legacy_adjust = { 'apt-update-upgrade': [ 'apt-configure', 'package-update-upgrade-install', ], } sem_helper = helpers.FileSemaphores(sem_path) - for (mod_name, migrate_to) in touch_there.items(): + for (mod_name, migrate_to) in legacy_adjust.items(): possibles = [mod_name, helpers.canon_sem_name(mod_name)] old_exists = [] for p in os.listdir(sem_path): -- cgit v1.2.3 From b80c2401123e16b9038ff3fb6f6d660717ee68e1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 8 Nov 2012 17:37:16 -0800 Subject: Fix the case where on a redhat based system the fully qualified domain name should end up in /etc/sysconfig/network by passing the fqdn to the update and set hostname methods and using it accordingly. LP: #1076759 --- cloudinit/config/cc_set_hostname.py | 10 ++++++---- cloudinit/config/cc_update_hostname.py | 8 +++++--- cloudinit/distros/__init__.py | 4 ++-- cloudinit/distros/debian.py | 4 ++-- cloudinit/distros/rhel.py | 25 +++++++++++++++++-------- 5 files changed, 32 insertions(+), 19 deletions(-) (limited to 'cloudinit/config') diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index b0f27ebf..2b32fc94 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -27,9 +27,11 @@ def handle(name, cfg, cloud, log, _args): " not setting the hostname in module %s"), name) return - (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: - log.debug("Setting hostname to %s", hostname) - cloud.distro.set_hostname(hostname) + log.debug("Setting the hostname to %s (%s)", fqdn, hostname) + cloud.distro.set_hostname(hostname, fqdn) except Exception: - util.logexc(log, "Failed to set hostname to %s", hostname) + util.logexc(log, "Failed to set the hostname to %s (%s)", + fqdn, hostname) + raise diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index 1d6679ea..52225cd8 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -32,10 +32,12 @@ def handle(name, cfg, cloud, log, _args): " not updating the hostname in module %s"), name) return - (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname") - cloud.distro.update_hostname(hostname, prev_fn) + log.debug("Updating hostname to %s (%s)", fqdn, hostname) + cloud.distro.update_hostname(hostname, fqdn, prev_fn) except Exception: - util.logexc(log, "Failed to set the hostname to %s", hostname) + util.logexc(log, "Failed to update the hostname to %s (%s)", + fqdn, hostname) raise diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 869540d2..bd04ba79 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -58,11 +58,11 @@ class Distro(object): return self._cfg.get(opt_name, default) @abc.abstractmethod - def set_hostname(self, hostname): + def set_hostname(self, hostname, fqdn=None): raise NotImplementedError() @abc.abstractmethod - def update_hostname(self, hostname, prev_hostname_fn): + def update_hostname(self, hostname, fqdn, prev_hostname_fn): raise NotImplementedError() @abc.abstractmethod diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index cc7e53a0..ed4070b4 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -67,7 +67,7 @@ class Distro(distros.Distro): else: return distros.Distro._bring_up_interfaces(self, device_names) - def set_hostname(self, hostname): + def set_hostname(self, hostname, fqdn=None): self._write_hostname(hostname, "/etc/hostname") LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -76,7 +76,7 @@ class Distro(distros.Distro): # "" gives trailing newline. util.write_file(out_fn, "%s\n" % str(hostname), 0644) - def update_hostname(self, hostname, prev_fn): + def update_hostname(self, hostname, fqdn, prev_fn): hostname_prev = self._read_hostname(prev_fn) hostname_in_etc = self._read_hostname("/etc/hostname") update_files = [] diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index bf3c18d2..e4c27216 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -146,8 +146,13 @@ class Distro(distros.Distro): lines.insert(0, _make_header()) util.write_file(fn, "\n".join(lines), 0644) - def set_hostname(self, hostname): - self._write_hostname(hostname, '/etc/sysconfig/network') + def set_hostname(self, hostname, fqdn=None): + # See: http://bit.ly/TwitgL + # Should be fqdn if we can use it + sysconfig_hostname = fqdn + if not sysconfig_hostname: + sysconfig_hostname = hostname + self._write_hostname(sysconfig_hostname, '/etc/sysconfig/network') LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -165,28 +170,32 @@ class Distro(distros.Distro): } self._update_sysconfig_file(out_fn, host_cfg) - def update_hostname(self, hostname, prev_file): + def update_hostname(self, hostname, fqdn, prev_file): + # See: http://bit.ly/TwitgL + # Should be fqdn if we can use it + sysconfig_hostname = fqdn + if not sysconfig_hostname: + sysconfig_hostname = hostname hostname_prev = self._read_hostname(prev_file) hostname_in_sys = self._read_hostname("/etc/sysconfig/network") update_files = [] - if not hostname_prev or hostname_prev != hostname: + if not hostname_prev or hostname_prev != sysconfig_hostname: update_files.append(prev_file) if (not hostname_in_sys or (hostname_in_sys == hostname_prev - and hostname_in_sys != hostname)): + and hostname_in_sys != sysconfig_hostname)): update_files.append("/etc/sysconfig/network") for fn in update_files: try: - self._write_hostname(hostname, fn) + self._write_hostname(sysconfig_hostname, fn) except: util.logexc(LOG, "Failed to write hostname %s to %s", - hostname, fn) + sysconfig_hostname, fn) if (hostname_in_sys and hostname_prev and hostname_in_sys != hostname_prev): LOG.debug(("%s differs from /etc/sysconfig/network." " Assuming user maintained hostname."), prev_file) if "/etc/sysconfig/network" in update_files: - # Only do this if we are running in non-adjusted root mode LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) -- cgit v1.2.3