diff options
author | harlowja <harlowja@virtualbox.rhel> | 2013-02-21 22:51:02 -0800 |
---|---|---|
committer | harlowja <harlowja@virtualbox.rhel> | 2013-02-21 22:51:02 -0800 |
commit | 575a084808db7d5ac607a848b018abe676e73a91 (patch) | |
tree | 34e179b0623074e6cd6fc03e4b3db001f8e493bf /cloudinit/config | |
parent | 9dfb60d3144860334ab1ad1d72920d962139461f (diff) | |
parent | d4886b65549c886499141872a9928412a74bbea2 (diff) | |
download | vyos-cloud-init-575a084808db7d5ac607a848b018abe676e73a91.tar.gz vyos-cloud-init-575a084808db7d5ac607a848b018abe676e73a91.zip |
Update to code on trunk.
Diffstat (limited to 'cloudinit/config')
25 files changed, 726 insertions, 214 deletions
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index 69a8cc68..d57453be 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -52,5 +52,7 @@ def fixup_module(mod, def_freq=PER_INSTANCE): if freq and freq not in FREQUENCIES: LOG.warn("Module %s has an unknown frequency %s", mod, freq) if not hasattr(mod, 'distros'): - setattr(mod, 'distros', None) + setattr(mod, 'distros', []) + if not hasattr(mod, 'osfamilies'): + setattr(mod, 'osfamilies', []) return mod diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_configure.py index 356bb98d..3ce3b351 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_configure.py @@ -20,7 +20,6 @@ import glob import os -import time from cloudinit import templater from cloudinit import util @@ -47,9 +46,6 @@ EXPORT_GPG_KEYID = """ def handle(name, cfg, cloud, log, _args): - update = util.get_cfg_option_bool(cfg, 'apt_update', False) - upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) - release = get_release() mirrors = find_apt_mirror_info(cloud, cfg) if not mirrors or "primary" not in mirrors: @@ -61,7 +57,7 @@ def handle(name, cfg, cloud, log, _args): mirror = mirrors["primary"] mirrors["mirror"] = mirror - log.debug("mirror info: %s" % mirrors) + log.debug("Mirror info: %s" % mirrors) if not util.get_cfg_option_bool(cfg, 'apt_preserve_sources_list', False): @@ -78,8 +74,7 @@ def handle(name, cfg, cloud, log, _args): try: # See man 'apt.conf' contents = PROXY_TPL % (proxy) - util.write_file(cloud.paths.join(False, proxy_filename), - contents) + util.write_file(proxy_filename, contents) except Exception as e: util.logexc(log, "Failed to write proxy to %s", proxy_filename) elif os.path.isfile(proxy_filename): @@ -90,61 +85,18 @@ def handle(name, cfg, cloud, log, _args): params = mirrors params['RELEASE'] = release params['MIRROR'] = mirror - errors = add_sources(cloud, cfg['apt_sources'], params) + errors = add_sources(cfg['apt_sources'], params) for e in errors: - log.warn("Source Error: %s", ':'.join(e)) + log.warn("Add source error: %s", ':'.join(e)) dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) if dconf_sel: - log.debug("setting debconf selections per cloud config") + log.debug("Setting debconf selections per cloud config") try: util.subp(('debconf-set-selections', '-'), dconf_sel) - except: + except Exception: util.logexc(log, "Failed to run debconf-set-selections") - pkglist = util.get_cfg_option_list(cfg, 'packages', []) - - errors = [] - if update or len(pkglist) or upgrade: - try: - cloud.distro.update_package_sources() - except Exception as e: - util.logexc(log, "Package update failed") - errors.append(e) - - if upgrade: - try: - cloud.distro.package_command("upgrade") - except Exception as e: - util.logexc(log, "Package upgrade failed") - errors.append(e) - - if len(pkglist): - try: - cloud.distro.install_packages(pkglist) - except Exception as e: - util.logexc(log, "Failed to install packages: %s ", pkglist) - errors.append(e) - - # kernel and openssl (possibly some other packages) - # write a file /var/run/reboot-required after upgrading. - # if that file exists and configured, then just stop right now and reboot - # TODO(smoser): handle this less voilently - reboot_file = "/var/run/reboot-required" - if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and - os.path.isfile(reboot_file)): - log.warn("rebooting after upgrade or install per %s" % reboot_file) - time.sleep(1) # give the warning time to get out - util.subp(["/sbin/reboot"]) - time.sleep(60) - log.warn("requested reboot did not happen!") - errors.append(Exception("requested reboot did not happen!")) - - if len(errors): - log.warn("%s failed with exceptions, re-raising the last one", - len(errors)) - raise errors[-1] - # get gpg keyid from keyserver def getkeybyid(keyid, keyserver): @@ -188,19 +140,21 @@ def get_release(): def generate_sources_list(codename, mirrors, cloud, log): - template_fn = cloud.get_template_filename('sources.list') + template_fn = cloud.get_template_filename('sources.list.%s' % + (cloud.distro.name)) if not template_fn: - log.warn("No template found, not rendering /etc/apt/sources.list") - return + template_fn = cloud.get_template_filename('sources.list') + if not template_fn: + log.warn("No template found, not rendering /etc/apt/sources.list") + return params = {'codename': codename} for k in mirrors: params[k] = mirrors[k] - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/apt/sources.list', params) -def add_sources(cloud, srclist, template_params=None): +def add_sources(srclist, template_params=None): """ add entries in /etc/apt/sources.list.d for each abbreviated sources.list entry in 'srclist'. When rendering template, also @@ -250,8 +204,7 @@ def add_sources(cloud, srclist, template_params=None): try: contents = "%s\n" % (source) - util.write_file(cloud.paths.join(False, ent['filename']), - contents, omode="ab") + util.write_file(ent['filename'], contents, omode="ab") except: errorlist.append([source, "failed write to file %s" % ent['filename']]) diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 02056ee0..e5629175 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -34,26 +34,24 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" # on TCP connections - otherwise data corruption will occur. -def handle(_name, cfg, cloud, log, _args): +def handle(_name, cfg, _cloud, log, _args): apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": - write_apt_snippet(cloud, "0", log, DEFAULT_FILE) + write_apt_snippet("0", log, DEFAULT_FILE) elif apt_pipe_value_s in ("none", "unchanged", "os"): return elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: - write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE) + write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) -def write_apt_snippet(cloud, setting, log, f_name): +def write_apt_snippet(setting, log, f_name): """Writes f_name with apt pipeline depth 'setting'.""" file_contents = APT_PIPE_TPL % (setting) - - util.write_file(cloud.paths.join(False, f_name), file_contents) - + util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index dc046bda..4f2a46a1 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -22,6 +22,7 @@ CA_CERT_PATH = "/usr/share/ca-certificates/" CA_CERT_FILENAME = "cloud-init-ca-certs.crt" CA_CERT_CONFIG = "/etc/ca-certificates.conf" CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" +CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) distros = ['ubuntu', 'debian'] @@ -33,7 +34,7 @@ def update_ca_certs(): util.subp(["update-ca-certificates"], capture=False) -def add_ca_certs(paths, certs): +def add_ca_certs(certs): """ Adds certificates to the system. To actually apply the new certificates you must also call L{update_ca_certs}. @@ -43,27 +44,31 @@ def add_ca_certs(paths, certs): if certs: # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) - cert_file_fullpath = paths.join(False, cert_file_fullpath) - util.write_file(cert_file_fullpath, cert_file_contents, mode=0644) + util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644) + # Append cert filename to CA_CERT_CONFIG file. - util.write_file(paths.join(False, CA_CERT_CONFIG), - "\n%s" % CA_CERT_FILENAME, omode="ab") + # We have to strip the content because blank lines in the file + # causes subsequent entries to be ignored. (LP: #1077020) + orig = util.load_file(CA_CERT_CONFIG) + cur_cont = '\n'.join([l for l in orig.splitlines() + if l != CA_CERT_FILENAME]) + out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME) + util.write_file(CA_CERT_CONFIG, out, omode="wb") -def remove_default_ca_certs(paths): +def remove_default_ca_certs(): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. """ - util.delete_dir_contents(paths.join(False, CA_CERT_PATH)) - util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH)) - util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644) + util.delete_dir_contents(CA_CERT_PATH) + util.delete_dir_contents(CA_CERT_SYSTEM_PATH) + util.write_file(CA_CERT_CONFIG, "", mode=0644) debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" util.subp(('debconf-set-selections', '-'), debconf_sel) -def handle(name, cfg, cloud, log, _args): +def handle(name, cfg, _cloud, log, _args): """ Call to handle ca-cert sections in cloud-config file. @@ -85,14 +90,14 @@ def handle(name, cfg, cloud, log, _args): # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs(cloud.paths) + remove_default_ca_certs() # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") if trusted_certs: log.debug("Adding %d certificates" % len(trusted_certs)) - add_ca_certs(cloud.paths, trusted_certs) + add_ca_certs(trusted_certs) # Update the system with the new cert configuration. log.debug("Updating certificates") diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 6f568261..607f789e 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -22,10 +22,22 @@ import json import os from cloudinit import templater +from cloudinit import url_helper from cloudinit import util RUBY_VERSION_DEFAULT = "1.8" +CHEF_DIRS = [ + '/etc/chef', + '/var/log/chef', + '/var/lib/chef', + '/var/cache/chef', + '/var/backups/chef', + '/var/run/chef', +] + +OMNIBUS_URL = "https://www.opscode.com/chef/install.sh" + def handle(name, cfg, cloud, log, _args): @@ -37,24 +49,15 @@ def handle(name, cfg, cloud, log, _args): chef_cfg = cfg['chef'] # Ensure the chef directories we use exist - c_dirs = [ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', - ] - for d in c_dirs: - util.ensure_dir(cloud.paths.join(False, d)) + for d in CHEF_DIRS: + util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: - v_fn = cloud.paths.join(False, '/etc/chef/validation.pem') - util.write_file(v_fn, chef_cfg[key]) + util.write_file('/etc/chef/validation.pem', chef_cfg[key]) break # Create the chef config from template @@ -68,8 +71,7 @@ def handle(name, cfg, cloud, log, _args): '_default'), 'validation_name': chef_cfg['validation_name'] } - out_fn = cloud.paths.join(False, '/etc/chef/client.rb') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/chef/client.rb', params) else: log.warn("No template found, not rendering to /etc/chef/client.rb") @@ -81,11 +83,12 @@ def handle(name, cfg, cloud, log, _args): initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] - firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json') - util.write_file(firstboot_fn, json.dumps(initial_json)) + util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' - if not os.path.isfile('/usr/bin/chef-client'): + if (not os.path.isfile('/usr/bin/chef-client') or + util.get_cfg_option_bool(chef_cfg, 'force_install', default=False)): + install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') if install_type == "gems": @@ -101,6 +104,14 @@ def handle(name, cfg, cloud, log, _args): elif install_type == 'packages': # this will install and run the chef-client from packages cloud.distro.install_packages(('chef',)) + elif install_type == 'omnibus': + url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) + content = url_helper.readurl(url=url, retries=5) + with util.tempdir() as tmpd: + # use tmpd over tmpfile to avoid 'Text file busy' on execute + tmpf = "%s/chef-omnibus-install" % tmpd + util.write_file(tmpf, content, mode=0700) + util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type %s", install_type) diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 331559f4..2efdff79 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -62,28 +62,23 @@ def handle(_name, cfg, cloud, log, _args): if not ls_cloudcfg: return - cloud.distro.install_packages(["landscape-client"]) + cloud.distro.install_packages(('landscape-client',)) merge_data = [ LSC_BUILTIN_CFG, - cloud.paths.join(True, LSC_CLIENT_CFG_FILE), + LSC_CLIENT_CFG_FILE, ls_cloudcfg, ] merged = merge_together(merge_data) - - lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE) - lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn)) - if not os.path.isdir(lsc_dir): - util.ensure_dir(lsc_dir) - contents = StringIO() merged.write(contents) - contents.flush() - util.write_file(lsc_client_fn, contents.getvalue()) - log.debug("Wrote landscape config file to %s", lsc_client_fn) + util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) + util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue()) + log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE) util.write_file(LS_DEFAULT_FILE, "RUN=1\n") + util.subp(["service", "landscape-client", "restart"]) def merge_together(objs): diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 2acdbc6f..b670390d 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -29,6 +29,7 @@ from cloudinit import util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" +SERVER_CFG = '/etc/mcollective/server.cfg' def handle(name, cfg, cloud, log, _args): @@ -48,26 +49,23 @@ def handle(name, cfg, cloud, log, _args): if 'conf' in mcollective_cfg: # Read server.cfg values from the # original file in order to be able to mix the rest up - server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg') - mcollective_config = ConfigObj(server_cfg_fn) + mcollective_config = ConfigObj(SERVER_CFG) # See: http://tiny.cc/jh9agw for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): if cfg_name == 'public-cert': - pubcert_fn = cloud.paths.join(True, PUBCERT_FILE) - util.write_file(pubcert_fn, cfg, mode=0644) - mcollective_config['plugin.ssl_server_public'] = pubcert_fn + util.write_file(PUBCERT_FILE, cfg, mode=0644) + mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE mcollective_config['securityprovider'] = 'ssl' elif cfg_name == 'private-cert': - pricert_fn = cloud.paths.join(True, PRICERT_FILE) - util.write_file(pricert_fn, cfg, mode=0600) - mcollective_config['plugin.ssl_server_private'] = pricert_fn + util.write_file(PRICERT_FILE, cfg, mode=0600) + mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE mcollective_config['securityprovider'] = 'ssl' else: if isinstance(cfg, (basestring, str)): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): - # Iterate throug the config items, create a section + # Iterate through the config items, create a section # if it is needed and then add/or create items as needed if cfg_name not in mcollective_config.sections: mcollective_config[cfg_name] = {} @@ -78,14 +76,12 @@ def handle(name, cfg, cloud, log, _args): mcollective_config[cfg_name] = str(cfg) # We got all our config as wanted we'll rename # the previous server.cfg and create our new one - old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old') - util.rename(server_cfg_fn, old_fn) + util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG)) # Now we got the whole file, write to disk... contents = StringIO() mcollective_config.write(contents) contents = contents.getvalue() - server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg') - util.write_file(server_cfg_rw, contents, mode=0644) + util.write_file(SERVER_CFG, contents, mode=0644) # Start mcollective util.subp(['service', 'mcollective', 'start'], capture=False) diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py new file mode 100644 index 00000000..facaa538 --- /dev/null +++ b/cloudinit/config/cc_migrator.py @@ -0,0 +1,85 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow <harlowja@yahoo-inc.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import shutil + +from cloudinit import helpers +from cloudinit import util + +from cloudinit.settings import PER_ALWAYS + +frequency = PER_ALWAYS + + +def _migrate_canon_sems(cloud): + paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + am_adjusted = 0 + for sem_path in paths: + if not sem_path or not os.path.exists(sem_path): + continue + for p in os.listdir(sem_path): + full_path = os.path.join(sem_path, p) + if os.path.isfile(full_path): + (name, ext) = os.path.splitext(p) + canon_name = helpers.canon_sem_name(name) + if canon_name != name: + new_path = os.path.join(sem_path, canon_name + ext) + shutil.move(full_path, new_path) + am_adjusted += 1 + return am_adjusted + + +def _migrate_legacy_sems(cloud, log): + legacy_adjust = { + 'apt-update-upgrade': [ + 'apt-configure', + 'package-update-upgrade-install', + ], + } + paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + for sem_path in paths: + if not sem_path or not os.path.exists(sem_path): + continue + sem_helper = helpers.FileSemaphores(sem_path) + for (mod_name, migrate_to) in legacy_adjust.items(): + possibles = [mod_name, helpers.canon_sem_name(mod_name)] + old_exists = [] + for p in os.listdir(sem_path): + (name, _ext) = os.path.splitext(p) + if name in possibles and os.path.isfile(p): + old_exists.append(p) + for p in old_exists: + util.del_file(os.path.join(sem_path, p)) + (_name, freq) = os.path.splitext(p) + for m in migrate_to: + log.debug("Migrating %s => %s with the same frequency", + p, m) + with sem_helper.lock(m, freq): + pass + + +def handle(name, cfg, cloud, log, _args): + do_migrate = util.get_cfg_option_str(cfg, "migrate", True) + if not util.translate_bool(do_migrate): + log.debug("Skipping module named %s, migration disabled", name) + return + sems_moved = _migrate_canon_sems(cloud) + log.debug("Migrated %s semaphore files to there canonicalized names", + sems_moved) + _migrate_legacy_sems(cloud, log) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 14c965bb..9010d97f 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -24,10 +24,11 @@ import re from cloudinit import util -# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1 -SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$" +# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 +SHORTNAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" SHORTNAME = re.compile(SHORTNAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) +FSTAB_PATH = "/etc/fstab" def is_mdname(name): @@ -167,8 +168,7 @@ def handle(_name, cfg, cloud, log, _args): cc_lines.append('\t'.join(line)) fstab_lines = [] - fstab = util.load_file(cloud.paths.join(True, "/etc/fstab")) - for line in fstab.splitlines(): + for line in util.load_file(FSTAB_PATH).splitlines(): try: toks = WS.split(line) if toks[3].find(comment) != -1: @@ -179,7 +179,7 @@ def handle(_name, cfg, cloud, log, _args): fstab_lines.extend(cc_lines) contents = "%s\n" % ('\n'.join(fstab_lines)) - util.write_file(cloud.paths.join(False, "/etc/fstab"), contents) + util.write_file(FSTAB_PATH, contents) if needswap: try: @@ -188,9 +188,8 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Activating swap via 'swapon -a' failed") for d in dirs: - real_dir = cloud.paths.join(False, d) try: - util.ensure_dir(real_dir) + util.ensure_dir(d) except: util.logexc(log, "Failed to make '%s' config-mount", d) diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py new file mode 100644 index 00000000..73b0e30d --- /dev/null +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -0,0 +1,99 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow <harlowja@yahoo-inc.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import time + +from cloudinit import log as logging +from cloudinit import util + +REBOOT_FILE = "/var/run/reboot-required" +REBOOT_CMD = ["/sbin/reboot"] + + +def _multi_cfg_bool_get(cfg, *keys): + for k in keys: + if util.get_cfg_option_bool(cfg, k, False): + return True + return False + + +def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): + util.subp(REBOOT_CMD) + start = time.time() + wait_time = initial_sleep + for _i in range(0, wait_attempts): + time.sleep(wait_time) + wait_time *= backoff + elapsed = time.time() - start + log.debug("Rebooted, but still running after %s seconds", int(elapsed)) + # If we got here, not good + elapsed = time.time() - start + raise RuntimeError(("Reboot did not happen" + " after %s seconds!") % (int(elapsed))) + + +def handle(_name, cfg, cloud, log, _args): + # Handle the old style + new config names + update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update') + upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade') + reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required', + 'package_reboot_if_required') + pkglist = util.get_cfg_option_list(cfg, 'packages', []) + + errors = [] + if update or len(pkglist) or upgrade: + try: + cloud.distro.update_package_sources() + except Exception as e: + util.logexc(log, "Package update failed") + errors.append(e) + + if upgrade: + try: + cloud.distro.package_command("upgrade") + except Exception as e: + util.logexc(log, "Package upgrade failed") + errors.append(e) + + if len(pkglist): + try: + cloud.distro.install_packages(pkglist) + except Exception as e: + util.logexc(log, "Failed to install packages: %s", pkglist) + errors.append(e) + + # TODO(smoser): handle this less violently + # kernel and openssl (possibly some other packages) + # write a file /var/run/reboot-required after upgrading. + # if that file exists and configured, then just stop right now and reboot + reboot_fn_exists = os.path.isfile(REBOOT_FILE) + if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: + try: + log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE) + # Flush the above warning + anything else out... + logging.flushLoggers(log) + _fire_reboot(log) + except Exception as e: + util.logexc(log, "Requested reboot did not happen!") + errors.append(e) + + if len(errors): + log.warn("%s failed with exceptions, re-raising the last one", + len(errors)) + raise errors[-1] diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 5a4332ef..c873c8a8 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -83,10 +83,10 @@ def handle(name, cfg, cloud, log, args): for (n, path) in pubkeys.iteritems(): try: - all_keys[n] = util.load_file(cloud.paths.join(True, path)) + all_keys[n] = util.load_file(path) except: util.logexc(log, ("%s: failed to open, can not" - " phone home that data"), path) + " phone home that data!"), path) submit_keys = {} for k in post_list: diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py new file mode 100644 index 00000000..aefa3aff --- /dev/null +++ b/cloudinit/config/cc_power_state_change.py @@ -0,0 +1,155 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# +# Author: Scott Moser <scott.moser@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from cloudinit.settings import PER_INSTANCE +from cloudinit import util + +import errno +import os +import re +import subprocess +import time + +frequency = PER_INSTANCE + +EXIT_FAIL = 254 + + +def handle(_name, cfg, _cloud, log, _args): + + try: + (args, timeout) = load_power_state(cfg) + if args is None: + log.debug("no power_state provided. doing nothing") + return + except Exception as e: + log.warn("%s Not performing power state change!" % str(e)) + return + + mypid = os.getpid() + cmdline = util.load_file("/proc/%s/cmdline" % mypid) + + if not cmdline: + log.warn("power_state: failed to get cmdline of current process") + return + + devnull_fp = open(os.devnull, "w") + + log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) + + util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, execmd, + [args, devnull_fp]) + + +def load_power_state(cfg): + # returns a tuple of shutdown_command, timeout + # shutdown_command is None if no config found + pstate = cfg.get('power_state') + + if pstate is None: + return (None, None) + + if not isinstance(pstate, dict): + raise TypeError("power_state is not a dict.") + + opt_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'} + + mode = pstate.get("mode") + if mode not in opt_map: + raise TypeError("power_state[mode] required, must be one of: %s." % + ','.join(opt_map.keys())) + + delay = pstate.get("delay", "now") + if delay != "now" and not re.match("\+[0-9]+", delay): + raise TypeError("power_state[delay] must be 'now' or '+m' (minutes).") + + args = ["shutdown", opt_map[mode], delay] + if pstate.get("message"): + args.append(pstate.get("message")) + + try: + timeout = float(pstate.get('timeout', 30.0)) + except ValueError: + raise ValueError("failed to convert timeout '%s' to float." % + pstate['timeout']) + + return (args, timeout) + + +def doexit(sysexit): + os._exit(sysexit) # pylint: disable=W0212 + + +def execmd(exe_args, output=None, data_in=None): + try: + proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, + stdout=output, stderr=subprocess.STDOUT) + proc.communicate(data_in) + ret = proc.returncode + except Exception: + doexit(EXIT_FAIL) + doexit(ret) + + +def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): + # wait until pid, with /proc/pid/cmdline contents of pidcmdline + # is no longer alive. After it is gone, or timeout has passed + # execute func(args) + msg = None + end_time = time.time() + timeout + + cmdline_f = "/proc/%s/cmdline" % pid + + def fatal(msg): + if log: + log.warn(msg) + doexit(EXIT_FAIL) + + known_errnos = (errno.ENOENT, errno.ESRCH) + + while True: + if time.time() > end_time: + msg = "timeout reached before %s ended" % pid + break + + try: + cmdline = "" + with open(cmdline_f) as fp: + cmdline = fp.read() + if cmdline != pidcmdline: + msg = "cmdline changed for %s [now: %s]" % (pid, cmdline) + break + + except IOError as ioerr: + if ioerr.errno in known_errnos: + msg = "pidfile '%s' gone [%d]" % (cmdline_f, ioerr.errno) + else: + fatal("IOError during wait: %s" % ioerr) + break + + except Exception as e: + fatal("Unexpected Exception: %s" % e) + + time.sleep(.25) + + if not msg: + fatal("Unexpected error in run_after_pid_gone") + + if log: + log.debug(msg) + func(*args) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 74ee18e1..471a1a8a 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -21,12 +21,32 @@ from StringIO import StringIO import os -import pwd import socket from cloudinit import helpers from cloudinit import util +PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' +PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/' +PUPPET_SSL_DIR = '/var/lib/puppet/ssl' +PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem' + + +def _autostart_puppet(log): + # Set puppet to automatically start + if os.path.exists('/etc/default/puppet'): + util.subp(['sed', '-i', + '-e', 's/^START=.*/START=yes/', + '/etc/default/puppet'], capture=False) + elif os.path.exists('/bin/systemctl'): + util.subp(['/bin/systemctl', 'enable', 'puppet.service'], + capture=False) + elif os.path.exists('/sbin/chkconfig'): + util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + else: + log.warn(("Sorry we do not know how to enable" + " puppet services on this system")) + def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything @@ -37,14 +57,21 @@ def handle(name, cfg, cloud, log, _args): puppet_cfg = cfg['puppet'] - # Start by installing the puppet package ... - cloud.distro.install_packages(["puppet"]) + # Start by installing the puppet package if necessary... + install = util.get_cfg_option_bool(puppet_cfg, 'install', True) + version = util.get_cfg_option_str(puppet_cfg, 'version', None) + if not install and version: + log.warn(("Puppet install set false but version supplied," + " doing nothing.")) + elif install: + log.debug(("Attempting to install puppet %s,"), + version if version else 'latest') + cloud.distro.install_packages(('puppet', version)) # ... and then update the puppet configuration if 'conf' in puppet_cfg: # Add all sections from the conf object to puppet.conf - puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf') - contents = util.load_file(puppet_conf_fn) + contents = util.load_file(PUPPET_CONF_PATH) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to @@ -53,28 +80,19 @@ def handle(name, cfg, cloud, log, _args): cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), - filename=puppet_conf_fn) + filename=PUPPET_CONF_PATH) for (cfg_name, cfg) in puppet_cfg['conf'].iteritems(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership - pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl') - util.ensure_dir(pp_ssl_dir, 0771) - util.chownbyid(pp_ssl_dir, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_certs = cloud.paths.join(False, - '/var/lib/puppet/ssl/certs/') - util.ensure_dir(pp_ssl_certs) - util.chownbyid(pp_ssl_certs, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_ca_certs = cloud.paths.join(False, - ('/var/lib/puppet/' - 'ssl/certs/ca.pem')) - util.write_file(pp_ssl_ca_certs, cfg) - util.chownbyid(pp_ssl_ca_certs, - pwd.getpwnam('puppet').pw_uid, 0) + util.ensure_dir(PUPPET_SSL_DIR, 0771) + util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') + util.ensure_dir(PUPPET_SSL_CERT_DIR) + util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') + util.write_file(PUPPET_SSL_CERT_PATH, str(cfg)) + util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') else: # Iterate throug the config items, we'll use ConfigParser.set # to overwrite or create new items as needed @@ -90,25 +108,11 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - conf_old_fn = cloud.paths.join(False, - '/etc/puppet/puppet.conf.old') - util.rename(puppet_conf_fn, conf_old_fn) - puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf') - util.write_file(puppet_conf_rw, puppet_config.stringify()) + util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH)) + util.write_file(PUPPET_CONF_PATH, puppet_config.stringify()) - # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - util.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - util.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) - else: - log.warn(("Sorry we do not know how to enable" - " puppet services on this system")) + # Set it up so it autostarts + _autostart_puppet(log) # Start puppetd util.subp(['service', 'puppet', 'start'], capture=False) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index e7f27944..70294eda 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -32,6 +32,8 @@ RESIZE_FS_PREFIXES_CMDS = [ ('xfs', 'xfs_growfs'), ] +NOBLOCK = "noblock" + def nodeify_path(devpth, where, log): try: @@ -62,23 +64,22 @@ def get_fs_type(st_dev, path, log): raise -def handle(name, cfg, cloud, log, args): +def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] else: resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) - if not util.translate_bool(resize_root): + if not util.translate_bool(resize_root, addons=[NOBLOCK]): log.debug("Skipping module named %s, resizing disabled", name) return # TODO(harlowja) is the directory ok to be used?? resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") - resize_root_d = cloud.paths.join(False, resize_root_d) util.ensure_dir(resize_root_d) # TODO(harlowja): allow what is to be resized to be configurable?? - resize_what = cloud.paths.join(False, "/") + resize_what = "/" with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", dir=resize_root_d, delete=True) as tfh: devpth = tfh.name @@ -111,7 +112,7 @@ def handle(name, cfg, cloud, log, args): log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer) resize_cmd = [resizer, devpth] - if resize_root == "noblock": + if resize_root == NOBLOCK: # Fork to a child that will run # the resize command util.fork_cb(do_resize, resize_cmd, log) @@ -121,7 +122,7 @@ def handle(name, cfg, cloud, log, args): do_resize(resize_cmd, log) action = 'Resized' - if resize_root == "noblock": + if resize_root == NOBLOCK: action = 'Resizing (via forking)' log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)", action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root) diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py new file mode 100644 index 00000000..8a460f7e --- /dev/null +++ b/cloudinit/config/cc_resolv_conf.py @@ -0,0 +1,107 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Craig Tracey +# +# Author: Craig Tracey <craigtracey@gmail.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# Note: +# This module is intended to manage resolv.conf in environments where +# early configuration of resolv.conf is necessary for further +# bootstrapping and/or where configuration management such as puppet or +# chef own dns configuration. As Debian/Ubuntu will, by default, utilize +# resovlconf, and similarly RedHat will use sysconfig, this module is +# likely to be of little use unless those are configured correctly. +# +# For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP +# enabled NICs. And, in Ubuntu/Debian it is recommended that DNS +# be configured via the standard /etc/network/interfaces configuration +# file. +# +# +# Usage Example: +# +# #cloud-config +# manage_resolv_conf: true +# +# resolv_conf: +# nameservers: ['8.8.4.4', '8.8.8.8'] +# searchdomains: +# - foo.example.com +# - bar.example.com +# domain: example.com +# options: +# rotate: true +# timeout: 1 +# + + +from cloudinit.settings import PER_INSTANCE +from cloudinit import templater +from cloudinit import util + +frequency = PER_INSTANCE + +distros = ['fedora', 'rhel'] + + +def generate_resolv_conf(cloud, log, params): + template_fn = cloud.get_template_filename('resolv.conf') + if not template_fn: + log.warn("No template found, not rendering /etc/resolv.conf") + return + + flags = [] + false_flags = [] + if 'options' in params: + for key, val in params['options'].iteritems(): + if type(val) == bool: + if val: + flags.append(key) + else: + false_flags.append(key) + + for flag in flags + false_flags: + del params['options'][flag] + + params['flags'] = flags + log.debug("Writing resolv.conf from template %s" % template_fn) + templater.render_to_file(template_fn, '/etc/resolv.conf', params) + + +def handle(name, cfg, _cloud, log, _args): + """ + Handler for resolv.conf + + @param name: The module name "resolv-conf" from cloud.cfg + @param cfg: A nested dict containing the entire cloud config contents. + @param cloud: The L{CloudInit} object in use. + @param log: Pre-initialized Python logger object to use for logging. + @param args: Any module arguments from cloud.cfg + """ + if "manage_resolv_conf" not in cfg: + log.debug(("Skipping module named %s," + " no 'manage_resolv_conf' key in configuration"), name) + return + + if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False): + log.debug(("Skipping module named %s," + " 'manage_resolv_conf' present but set to False"), name) + return + + if not "resolv_conf" in cfg: + log.warn("manage_resolv_conf True but no parameters provided!") + + generate_resolv_conf(_cloud, log, cfg["resolv_conf"]) + return diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 78327526..0c2c6880 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -71,8 +71,7 @@ def handle(name, cfg, cloud, log, _args): try: contents = "%s\n" % (content) - util.write_file(cloud.paths.join(False, filename), - contents, omode=omode) + util.write_file(filename, contents, omode=omode) except Exception: util.logexc(log, "Failed to write to %s", filename) diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 65064cfb..598c3a3e 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args): cmd = cfg["runcmd"] try: content = util.shellify(cmd) - util.write_file(cloud.paths.join(False, out_fn), content, 0700) + util.write_file(out_fn, content, 0700) except: util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn) diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index 8a1440d9..53013dcb 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -31,11 +31,10 @@ def handle(name, cfg, cloud, log, _args): salt_cfg = cfg['salt_minion'] # Start by installing the salt package ... - cloud.distro.install_packages(["salt-minion"]) + cloud.distro.install_packages(('salt-minion',)) # Ensure we can configure files at the right dir - config_dir = cloud.paths.join(False, salt_cfg.get("config_dir", - '/etc/salt')) + config_dir = salt_cfg.get("config_dir", '/etc/salt') util.ensure_dir(config_dir) # ... and then update the salt configuration @@ -47,8 +46,7 @@ def handle(name, cfg, cloud, log, _args): # ... copy the key pair if specified if 'public_key' in salt_cfg and 'private_key' in salt_cfg: - pki_dir = cloud.paths.join(False, salt_cfg.get('pki_dir', - '/etc/salt/pki')) + pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki') with util.umask(077): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index b0f27ebf..2b32fc94 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -27,9 +27,11 @@ def handle(name, cfg, cloud, log, _args): " not setting the hostname in module %s"), name) return - (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: - log.debug("Setting hostname to %s", hostname) - cloud.distro.set_hostname(hostname) + log.debug("Setting the hostname to %s (%s)", fqdn, hostname) + cloud.distro.set_hostname(hostname, fqdn) except Exception: - util.logexc(log, "Failed to set hostname to %s", hostname) + util.logexc(log, "Failed to set the hostname to %s (%s)", + fqdn, hostname) + raise diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 26c558ad..c6bf62fd 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -114,8 +114,7 @@ def handle(_name, cfg, cloud, log, args): replaced_auth = False # See: man sshd_config - conf_fn = cloud.paths.join(True, ssh_util.DEF_SSHD_CFG) - old_lines = ssh_util.parse_ssh_config(conf_fn) + old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) new_lines = [] i = 0 for (i, line) in enumerate(old_lines): @@ -134,8 +133,7 @@ def handle(_name, cfg, cloud, log, args): pw_auth)) lines = [str(e) for e in new_lines] - ssh_rw_fn = cloud.paths.join(False, ssh_util.DEF_SSHD_CFG) - util.write_file(ssh_rw_fn, "\n".join(lines)) + util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: cmd = ['service'] diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 32e48c30..b623d476 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -59,7 +59,7 @@ def handle(_name, cfg, cloud, log, _args): # remove the static keys from the pristine image if cfg.get("ssh_deletekeys", True): - key_pth = cloud.paths.join(False, "/etc/ssh/", "ssh_host_*key*") + key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*") for f in glob.glob(key_pth): try: util.del_file(f) @@ -72,8 +72,7 @@ def handle(_name, cfg, cloud, log, _args): if key in KEY_2_FILE: tgt_fn = KEY_2_FILE[key][0] tgt_perms = KEY_2_FILE[key][1] - util.write_file(cloud.paths.join(False, tgt_fn), - val, tgt_perms) + util.write_file(tgt_fn, val, tgt_perms) for (priv, pub) in PRIV_2_PUB.iteritems(): if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: @@ -94,7 +93,7 @@ def handle(_name, cfg, cloud, log, _args): 'ssh_genkeytypes', GENERATE_KEY_NAMES) for keytype in genkeys: - keyfile = cloud.paths.join(False, KEY_FILE_TPL % (keytype)) + keyfile = KEY_FILE_TPL % (keytype) util.ensure_dir(os.path.dirname(keyfile)) if not os.path.exists(keyfile): cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] @@ -118,17 +117,16 @@ def handle(_name, cfg, cloud, log, _args): cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) - apply_credentials(keys, user, cloud.paths, - disable_root, disable_root_opts) + apply_credentials(keys, user, disable_root, disable_root_opts) except: util.logexc(log, "Applying ssh credentials failed!") -def apply_credentials(keys, user, paths, disable_root, disable_root_opts): +def apply_credentials(keys, user, disable_root, disable_root_opts): keys = set(keys) if user: - ssh_util.setup_user_keys(keys, user, '', paths) + ssh_util.setup_user_keys(keys, user, '') if disable_root: if not user: @@ -137,4 +135,4 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts): else: key_prefix = '' - ssh_util.setup_user_keys(keys, 'root', key_prefix, paths) + ssh_util.setup_user_keys(keys, 'root', key_prefix) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 8c9a8806..c38bcea2 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -97,9 +97,8 @@ def handle(name, cfg, cloud, log, _args): "logging of ssh fingerprints disabled"), name) hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") - extract_func = ssh_util.extract_authorized_keys (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): - (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) - _pprint_key_entries(user_name, auth_key_fn, - auth_key_entries, hash_meth) + (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name) + _pprint_key_entries(user_name, key_fn, + key_entries, hash_meth) diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 4d75000f..d3dd1f32 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -37,13 +37,13 @@ def handle(name, cfg, cloud, log, _args): # Render from a template file tpl_fn_name = cloud.get_template_filename("hosts.%s" % - (cloud.distro.name)) + (cloud.distro.osfamily)) if not tpl_fn_name: raise RuntimeError(("No hosts template could be" - " found for distro %s") % (cloud.distro.name)) + " found for distro %s") % + (cloud.distro.osfamily)) - out_fn = cloud.paths.join(False, '/etc/hosts') - templater.render_to_file(tpl_fn_name, out_fn, + templater.render_to_file(tpl_fn_name, '/etc/hosts', {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index 1d6679ea..52225cd8 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -32,10 +32,12 @@ def handle(name, cfg, cloud, log, _args): " not updating the hostname in module %s"), name) return - (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname") - cloud.distro.update_hostname(hostname, prev_fn) + log.debug("Updating hostname to %s (%s)", fqdn, hostname) + cloud.distro.update_hostname(hostname, fqdn, prev_fn) except Exception: - util.logexc(log, "Failed to set the hostname to %s", hostname) + util.logexc(log, "Failed to update the hostname to %s (%s)", + fqdn, hostname) raise diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py new file mode 100644 index 00000000..5c273825 --- /dev/null +++ b/cloudinit/config/cc_yum_add_repo.py @@ -0,0 +1,106 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow <harlowja@yahoo-inc.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os + +from cloudinit import util + +import configobj + + +def _canonicalize_id(repo_id): + repo_id = repo_id.lower().replace("-", "_") + repo_id = repo_id.replace(" ", "_") + return repo_id + + +def _format_repo_value(val): + if isinstance(val, (bool)): + # Seems like yum prefers 1/0 + return str(int(val)) + if isinstance(val, (list, tuple)): + # Can handle 'lists' in certain cases + # See: http://bit.ly/Qqrf1t + return "\n ".join([_format_repo_value(v) for v in val]) + if not isinstance(val, (basestring, str)): + return str(val) + return val + + +## TODO(harlowja): move to distro? +# See man yum.conf +def _format_repository_config(repo_id, repo_config): + to_be = configobj.ConfigObj() + to_be[repo_id] = {} + # Do basic translation of the items -> values + for (k, v) in repo_config.items(): + # For now assume that people using this know + # the format of yum and don't verify keys/values further + to_be[repo_id][k] = _format_repo_value(v) + lines = to_be.write() + lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822())) + return "\n".join(lines) + + +def handle(name, cfg, _cloud, log, _args): + repos = cfg.get('yum_repos') + if not repos: + log.debug(("Skipping module named %s," + " no 'yum_repos' configuration found"), name) + return + repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir', + '/etc/yum.repos.d/') + repo_locations = {} + repo_configs = {} + for (repo_id, repo_config) in repos.items(): + canon_repo_id = _canonicalize_id(repo_id) + repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) + if os.path.exists(repo_fn_pth): + log.info("Skipping repo %s, file %s already exists!", + repo_id, repo_fn_pth) + continue + elif canon_repo_id in repo_locations: + log.info("Skipping repo %s, file %s already pending!", + repo_id, repo_fn_pth) + continue + if not repo_config: + repo_config = {} + # Do some basic sanity checks/cleaning + n_repo_config = {} + for (k, v) in repo_config.items(): + k = k.lower().strip().replace("-", "_") + if k: + n_repo_config[k] = v + repo_config = n_repo_config + missing_required = 0 + for req_field in ['baseurl']: + if not req_field in repo_config: + log.warn(("Repository %s does not contain a %s" + " configuration 'required' entry"), + repo_id, req_field) + missing_required += 1 + if not missing_required: + repo_configs[canon_repo_id] = repo_config + repo_locations[canon_repo_id] = repo_fn_pth + else: + log.warn("Repository %s is missing %s required fields, skipping!", + repo_id, missing_required) + for (c_repo_id, path) in repo_locations.items(): + repo_blob = _format_repository_config(c_repo_id, + repo_configs.get(c_repo_id)) + util.write_file(path, repo_blob) |