diff options
Diffstat (limited to 'cloudinit')
25 files changed, 905 insertions, 214 deletions
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index bc896253..fc837363 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -18,6 +18,57 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +""" +**Summary:** module that configures, starts and installs chef. + +**Description:** This module enables chef to be installed (from packages or +from gems, or from omnibus). Before this occurs chef configurations are +written to disk (validation.pem, client.pem, firstboot.json, client.rb), +and needed chef folders/directories are created (/etc/chef and /var/log/chef +and so-on). Then once installing proceeds correctly if configured chef will +be started (in daemon mode or in non-daemon mode) and then once that has +finished (if ran in non-daemon mode this will be when chef finishes +converging, if ran in daemon mode then no further actions are possible since +chef will have forked into its own process) then a post run function can +run that can do finishing activities (such as removing the validation pem +file). + +It can be configured with the following option structure:: + + chef: + directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef, + /var/cache/chef, /var/backups/chef, /var/run/chef) + validation_key or validation_cert: (optional string to be written to + /etc/chef/validation.pem) + firstboot_path: (path to write run_list and initial_attributes keys that + should also be present in this configuration, defaults + to /etc/chef/firstboot.json) + exec: boolean to run or not run chef (defaults to false, unless + a gem installed is requested + where this will then default + to true) + + chef.rb template keys (if falsey, then will be skipped and not + written to /etc/chef/client.rb) + + chef: + client_key: + environment: + file_backup_path: + file_cache_path: + json_attribs: + log_level: + log_location: + node_name: + pid_file: + server_url: + show_time: + ssl_verify_mode: + validation_key: + validation_name: +""" + +import itertools import json import os @@ -27,19 +78,112 @@ from cloudinit import util RUBY_VERSION_DEFAULT = "1.8" -CHEF_DIRS = [ +CHEF_DIRS = tuple([ '/etc/chef', '/var/log/chef', '/var/lib/chef', '/var/cache/chef', '/var/backups/chef', '/var/run/chef', -] +]) +REQUIRED_CHEF_DIRS = tuple([ + '/etc/chef', +]) +# Used if fetching chef from a omnibus style package OMNIBUS_URL = "https://www.getchef.com/chef/install.sh" +OMNIBUS_URL_RETRIES = 5 + +CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem' +CHEF_FB_PATH = '/etc/chef/firstboot.json' +CHEF_RB_TPL_DEFAULTS = { + # These are ruby symbols... + 'ssl_verify_mode': ':verify_none', + 'log_level': ':info', + # These are not symbols... + 'log_location': '/var/log/chef/client.log', + 'validation_key': CHEF_VALIDATION_PEM_PATH, + 'client_key': "/etc/chef/client.pem", + 'json_attribs': CHEF_FB_PATH, + 'file_cache_path': "/var/cache/chef", + 'file_backup_path': "/var/backups/chef", + 'pid_file': "/var/run/chef/client.pid", + 'show_time': True, +} +CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) +CHEF_RB_TPL_PATH_KEYS = frozenset([ + 'log_location', + 'validation_key', + 'client_key', + 'file_cache_path', + 'json_attribs', + 'file_cache_path', + 'pid_file', +]) +CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) +CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) +CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS) +CHEF_RB_TPL_KEYS.extend([ + 'server_url', + 'node_name', + 'environment', + 'validation_name', +]) +CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS) +CHEF_RB_PATH = '/etc/chef/client.rb' +CHEF_EXEC_PATH = '/usr/bin/chef-client' +CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) + + +def is_installed(): + if not os.path.isfile(CHEF_EXEC_PATH): + return False + if not os.access(CHEF_EXEC_PATH, os.X_OK): + return False + return True + + +def post_run_chef(chef_cfg, log): + delete_pem = util.get_cfg_option_bool(chef_cfg, + 'delete_validation_post_exec', + default=False) + if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH): + os.unlink(CHEF_VALIDATION_PEM_PATH) + + +def get_template_params(iid, chef_cfg, log): + params = CHEF_RB_TPL_DEFAULTS.copy() + # Allow users to overwrite any of the keys they want (if they so choose), + # when a value is None, then the value will be set to None and no boolean + # or string version will be populated... + for (k, v) in chef_cfg.items(): + if k not in CHEF_RB_TPL_KEYS: + log.debug("Skipping unknown chef template key '%s'", k) + continue + if v is None: + params[k] = None + else: + # This will make the value a boolean or string... + if k in CHEF_RB_TPL_BOOL_KEYS: + params[k] = util.get_cfg_option_bool(chef_cfg, k) + else: + params[k] = util.get_cfg_option_str(chef_cfg, k) + # These ones are overwritten to be exact values... + params.update({ + 'generated_by': util.make_header(), + 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', + default=iid), + 'environment': util.get_cfg_option_str(chef_cfg, 'environment', + default='_default'), + # These two are mandatory... + 'server_url': chef_cfg['server_url'], + 'validation_name': chef_cfg['validation_name'], + }) + return params def handle(name, cfg, cloud, log, _args): + """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: @@ -49,7 +193,10 @@ def handle(name, cfg, cloud, log, _args): chef_cfg = cfg['chef'] # Ensure the chef directories we use exist - for d in CHEF_DIRS: + chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') + if not chef_dirs: + chef_dirs = list(CHEF_DIRS) + for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' @@ -57,64 +204,108 @@ def handle(name, cfg, cloud, log, _args): # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: - util.write_file('/etc/chef/validation.pem', chef_cfg[key]) + util.write_file(CHEF_VALIDATION_PEM_PATH, chef_cfg[key]) break # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') if template_fn: iid = str(cloud.datasource.get_instance_id()) - params = { - 'server_url': chef_cfg['server_url'], - 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid), - 'environment': util.get_cfg_option_str(chef_cfg, 'environment', - '_default'), - 'validation_name': chef_cfg['validation_name'] - } - templater.render_to_file(template_fn, '/etc/chef/client.rb', params) + params = get_template_params(iid, chef_cfg, log) + # Do a best effort attempt to ensure that the template values that + # are associated with paths have there parent directory created + # before they are used by the chef-client itself. + param_paths = set() + for (k, v) in params.items(): + if k in CHEF_RB_TPL_PATH_KEYS and v: + param_paths.add(os.path.dirname(v)) + util.ensure_dirs(param_paths) + templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: - log.warn("No template found, not rendering to /etc/chef/client.rb") - - # set the firstboot json - initial_json = {} - if 'run_list' in chef_cfg: - initial_json['run_list'] = chef_cfg['run_list'] - if 'initial_attributes' in chef_cfg: - initial_attributes = chef_cfg['initial_attributes'] - for k in list(initial_attributes.keys()): - initial_json[k] = initial_attributes[k] - util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) + log.warn("No template found, not rendering to %s", + CHEF_RB_PATH) - # If chef is not installed, we install chef based on 'install_type' - if (not os.path.isfile('/usr/bin/chef-client') or - util.get_cfg_option_bool(chef_cfg, - 'force_install', default=False)): - - install_type = util.get_cfg_option_str(chef_cfg, 'install_type', - 'packages') - if install_type == "gems": - # this will install and run the chef-client from gems - chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) - ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', - RUBY_VERSION_DEFAULT) - install_chef_from_gems(cloud.distro, ruby_version, chef_version) - # and finally, run chef-client - log.debug('Running chef-client') - util.subp(['/usr/bin/chef-client', - '-d', '-i', '1800', '-s', '20'], capture=False) - elif install_type == 'packages': - # this will install and run the chef-client from packages - cloud.distro.install_packages(('chef',)) - elif install_type == 'omnibus': - url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) - content = url_helper.readurl(url=url, retries=5) - with util.tempdir() as tmpd: - # use tmpd over tmpfile to avoid 'Text file busy' on execute - tmpf = "%s/chef-omnibus-install" % tmpd - util.write_file(tmpf, str(content), mode=0700) - util.subp([tmpf], capture=False) + # Set the firstboot json + fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', + default=CHEF_FB_PATH) + if not fb_filename: + log.info("First boot path empty, not writing first boot json file") + else: + initial_json = {} + if 'run_list' in chef_cfg: + initial_json['run_list'] = chef_cfg['run_list'] + if 'initial_attributes' in chef_cfg: + initial_attributes = chef_cfg['initial_attributes'] + for k in list(initial_attributes.keys()): + initial_json[k] = initial_attributes[k] + util.write_file(fb_filename, json.dumps(initial_json)) + + # Try to install chef, if its not already installed... + force_install = util.get_cfg_option_bool(chef_cfg, + 'force_install', default=False) + if not is_installed() or force_install: + run = install_chef(cloud, chef_cfg, log) + elif is_installed(): + run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + else: + run = False + if run: + run_chef(chef_cfg, log) + post_run_chef(chef_cfg, log) + + +def run_chef(chef_cfg, log): + log.debug('Running chef-client') + cmd = [CHEF_EXEC_PATH] + if 'exec_arguments' in chef_cfg: + cmd_args = chef_cfg['exec_arguments'] + if isinstance(cmd_args, (list, tuple)): + cmd.extend(cmd_args) + elif isinstance(cmd_args, (str, basestring)): + cmd.append(cmd_args) else: - log.warn("Unknown chef install type %s", install_type) + log.warn("Unknown type %s provided for chef" + " 'exec_arguments' expected list, tuple," + " or string", type(cmd_args)) + cmd.extend(CHEF_EXEC_DEF_ARGS) + else: + cmd.extend(CHEF_EXEC_DEF_ARGS) + util.subp(cmd, capture=False) + + +def install_chef(cloud, chef_cfg, log): + # If chef is not installed, we install chef based on 'install_type' + install_type = util.get_cfg_option_str(chef_cfg, 'install_type', + 'packages') + run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + if install_type == "gems": + # This will install and run the chef-client from gems + chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) + ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', + RUBY_VERSION_DEFAULT) + install_chef_from_gems(cloud.distro, ruby_version, chef_version) + # Retain backwards compat, by preferring True instead of False + # when not provided/overriden... + run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) + elif install_type == 'packages': + # This will install and run the chef-client from packages + cloud.distro.install_packages(('chef',)) + elif install_type == 'omnibus': + # This will install as a omnibus unified package + url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) + retries = max(0, util.get_cfg_option_int(chef_cfg, + "omnibus_url_retries", + default=OMNIBUS_URL_RETRIES)) + content = url_helper.readurl(url=url, retries=retries) + with util.tempdir() as tmpd: + # Use tmpdir over tmpfile to avoid 'text file busy' on execute + tmpf = "%s/chef-omnibus-install" % tmpd + util.write_file(tmpf, str(content), mode=0700) + util.subp([tmpf], capture=False) + else: + log.warn("Unknown chef install type '%s'", install_type) + run = False + return run def get_ruby_packages(version): @@ -133,9 +324,9 @@ def install_chef_from_gems(ruby_version, chef_version, distro): util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') if chef_version: util.subp(['/usr/bin/gem', 'install', 'chef', - '-v %s' % chef_version, '--no-ri', - '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) + '-v %s' % chef_version, '--no-ri', + '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) else: util.subp(['/usr/bin/gem', 'install', 'chef', - '--no-ri', '--no-rdoc', '--bindir', - '/usr/bin', '-q'], capture=False) + '--no-ri', '--no-rdoc', '--bindir', + '/usr/bin', '-q'], capture=False) diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 7219b0f8..8c489426 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -14,11 +14,33 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -from cloudinit import type_utils -from cloudinit import util +""" +**Summary:** helper to debug cloud-init *internal* datastructures. + +**Description:** This module will enable for outputting various internal +information that cloud-init sources provide to either a file or to the output +console/log location that this cloud-init has been configured with when +running. + +It can be configured with the following option structure:: + + debug: + verbose: (defaulting to true) + output: (location to write output, defaulting to console + log) + +.. note:: + + Log configurations are not output. +""" + import copy from StringIO import StringIO +from cloudinit import type_utils +from cloudinit import util + +SKIP_KEYS = frozenset(['log_cfgs']) + def _make_header(text): header = StringIO() @@ -31,7 +53,14 @@ def _make_header(text): return header.getvalue() +def _dumps(obj): + text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False) + return text.rstrip() + + def handle(name, cfg, cloud, log, args): + """Handler method activated by cloud-init.""" + verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose @@ -46,7 +75,7 @@ def handle(name, cfg, cloud, log, args): return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) - for k in ['log_cfgs']: + for k in SKIP_KEYS: dump_cfg.pop(k, None) all_keys = list(dump_cfg.keys()) for k in all_keys: @@ -55,10 +84,10 @@ def handle(name, cfg, cloud, log, args): # Now dump it... to_print = StringIO() to_print.write(_make_header("Config")) - to_print.write(util.yaml_dumps(dump_cfg)) + to_print.write(_dumps(dump_cfg)) to_print.write("\n") to_print.write(_make_header("MetaData")) - to_print.write(util.yaml_dumps(cloud.datasource.metadata)) + to_print.write(_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) to_print.write("Datasource: %s\n" % diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index 85716a91..e3219e81 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -47,7 +47,8 @@ def handle(_name, cfg, _cloud, log, _args): idevs_empty = "false" if idevs is None: idevs = "/dev/sda" - for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): + for dev in ("/dev/sda", "/dev/vda", "/dev/xvda", + "/dev/sda1", "/dev/vda1", "/dev/xvda1"): if os.path.exists(dev): idevs = dev break diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index ba1303d1..1cb1e839 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -75,6 +75,159 @@ def sanitize_devname(startname, transformer, log): return devnode_for_dev_part(blockdev, part) +def suggested_swapsize(memsize=None, maxsize=None, fsys=None): + # make a suggestion on the size of swap for this system. + if memsize is None: + memsize = util.read_meminfo()['total'] + + GB = 2 ** 30 + sugg_max = 8 * GB + + info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize} + + if fsys is None and maxsize is None: + # set max to 8GB default if no filesystem given + maxsize = sugg_max + elif fsys: + statvfs = os.statvfs(fsys) + avail = statvfs.f_frsize * statvfs.f_bfree + info['avail'] = avail + + if maxsize is None: + # set to 25% of filesystem space + maxsize = min(int(avail / 4), sugg_max) + elif maxsize > ((avail * .9)): + # set to 90% of available disk space + maxsize = int(avail * .9) + elif maxsize is None: + maxsize = sugg_max + + info['max'] = maxsize + + formulas = [ + # < 1G: swap = double memory + (1 * GB, lambda x: x * 2), + # < 2G: swap = 2G + (2 * GB, lambda x: 2 * GB), + # < 4G: swap = memory + (4 * GB, lambda x: x), + # < 16G: 4G + (16 * GB, lambda x: 4 * GB), + # < 64G: 1/2 M up to max + (64 * GB, lambda x: x / 2), + ] + + size = None + for top, func in formulas: + if memsize <= top: + size = min(func(memsize), maxsize) + # if less than 1/2 memory and not much, return 0 + if size < (memsize / 2) and size < 4 * GB: + size = 0 + break + break + + if size is not None: + size = maxsize + + info['size'] = size + + MB = 2 ** 20 + pinfo = {} + for k, v in info.items(): + if isinstance(v, int): + pinfo[k] = "%s MB" % (v / MB) + else: + pinfo[k] = v + + LOG.debug("suggest %(size)s swap for %(mem)s memory with '%(avail)s'" + " disk given max=%(max_in)s [max=%(max)s]'" % pinfo) + return size + + +def setup_swapfile(fname, size=None, maxsize=None): + """ + fname: full path string of filename to setup + size: the size to create. set to "auto" for recommended + maxsize: the maximum size + """ + tdir = os.path.dirname(fname) + if str(size).lower() == "auto": + try: + memsize = util.read_meminfo()['total'] + except IOError as e: + LOG.debug("Not creating swap. failed to read meminfo") + return + + util.ensure_dir(tdir) + size = suggested_swapsize(fsys=tdir, maxsize=maxsize, + memsize=memsize) + + if not size: + LOG.debug("Not creating swap: suggested size was 0") + return + + mbsize = str(int(size / (2 ** 20))) + msg = "creating swap file '%s' of %sMB" % (fname, mbsize) + try: + util.ensure_dir(tdir) + util.log_time(LOG.debug, msg, func=util.subp, + args=[['sh', '-c', + ('rm -f "$1" && umask 0066 && ' + 'dd if=/dev/zero "of=$1" bs=1M "count=$2" && ' + 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), + 'setup_swap', fname, mbsize]]) + + except Exception as e: + raise IOError("Failed %s: %s" % (msg, e)) + + return fname + + +def handle_swapcfg(swapcfg): + """handle the swap config, calling setup_swap if necessary. + return None or (filename, size) + """ + if not isinstance(swapcfg, dict): + LOG.warn("input for swap config was not a dict.") + return None + + fname = swapcfg.get('filename', '/swap.img') + size = swapcfg.get('size', 0) + maxsize = swapcfg.get('maxsize', None) + + if not (size and fname): + LOG.debug("no need to setup swap") + return + + if os.path.exists(fname): + if not os.path.exists("/proc/swaps"): + LOG.debug("swap file %s existed. no /proc/swaps. Being safe.", + fname) + return fname + try: + for line in util.load_file("/proc/swaps").splitlines(): + if line.startswith(fname + " "): + LOG.debug("swap file %s already in use.", fname) + return fname + LOG.debug("swap file %s existed, but not in /proc/swaps", fname) + except: + LOG.warn("swap file %s existed. Error reading /proc/swaps", fname) + return fname + + try: + if isinstance(size, str) and size != "auto": + size = util.human2bytes(size) + if isinstance(maxsize, str): + maxsize = util.human2bytes(maxsize) + return setup_swapfile(fname=fname, size=size, maxsize=maxsize) + + except Exception as e: + LOG.warn("failed to setup swap: %s", e) + + return None + + def handle(_name, cfg, cloud, log, _args): # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno defvals = [None, None, "auto", "defaults,nobootwait", "0", "2"] @@ -162,6 +315,10 @@ def handle(_name, cfg, cloud, log, _args): else: actlist.append(x) + swapret = handle_swapcfg(cfg.get('swap', {})) + if swapret: + actlist.append([swapret, "none", "swap", "sw", "0", "0"]) + if len(actlist) == 0: log.debug("No modifications to fstab needed.") return diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 7e796228..cbc07853 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -162,8 +162,8 @@ def handle(name, cfg, _cloud, log, args): # Fork to a child that will run # the resize command util.fork_cb( - util.log_time(logfunc=log.debug, msg="backgrounded Resizing", - func=do_resize, args=(resize_cmd, log))) + util.log_time, logfunc=log.debug, msg="backgrounded Resizing", + func=do_resize, args=(resize_cmd, log)) else: util.log_time(logfunc=log.debug, msg="Resizing", func=do_resize, args=(resize_cmd, log)) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 24e33915..4ca85e21 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -132,7 +132,7 @@ def handle(_name, cfg, cloud, log, args): 'PasswordAuthentication', pw_auth)) - lines = [str(e) for e in new_lines] + lines = [str(l) for l in new_lines] util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 76c1663d..2d480d7e 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -85,7 +85,7 @@ def import_ssh_ids(ids, user, log): return try: - _check = pwd.getpwnam(user) + pwd.getpwnam(user) except KeyError as exc: raise exc diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py index 6f994bff..7e88ed85 100644 --- a/cloudinit/config/cc_ubuntu_init_switch.py +++ b/cloudinit/config/cc_ubuntu_init_switch.py @@ -17,30 +17,27 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. """ -ubuntu_init_switch: reboot system into another init +**Summary:** reboot system into another init. -This provides a way for the user to boot with systemd even if the -image is set to boot with upstart. It should be run as one of the first -cloud_init_modules, and will switch the init system and then issue a reboot. -The next boot will come up in the target init system and no action will +**Description:** This module provides a way for the user to boot with systemd +even if the image is set to boot with upstart. It should be run as one of the +first ``cloud_init_modules``, and will switch the init system and then issue a +reboot. The next boot will come up in the target init system and no action will be taken. This should be inert on non-ubuntu systems, and also exit quickly. -config is comes under the top level 'init_switch' dictionary. +It can be configured with the following option structure:: -#cloud-config -init_switch: - target: systemd - reboot: true + init_switch: + target: systemd (can be 'systemd' or 'upstart') + reboot: true (reboot if a change was made, or false to not reboot) -'target' can be 'systemd' or 'upstart'. Best effort is made, but its possible -this system will break, and probably won't interact well with any other -mechanism you've used to switch the init system. +.. note:: -'reboot': [default=true]. - true: reboot if a change was made. - false: do not reboot. + Best effort is made, but it's possible + this system will break, and probably won't interact well with any other + mechanism you've used to switch the init system. """ from cloudinit.settings import PER_INSTANCE @@ -91,6 +88,7 @@ fi def handle(name, cfg, cloud, log, args): + """Handler method activated by cloud-init.""" if not isinstance(cloud.distro, ubuntu.Distro): log.debug("%s: distro is '%s', not ubuntu. returning", diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 9c9211fe..83c2eebf 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -387,8 +387,20 @@ class Distro(object): # Import SSH keys if 'ssh_authorized_keys' in kwargs: - keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, options=None) + # Try to handle this in a smart manner. + keys = kwargs['ssh_authorized_keys'] + if isinstance(keys, (basestring, str)): + keys = [keys] + if isinstance(keys, dict): + keys = list(keys.values()) + if keys is not None: + if not isinstance(keys, (tuple, list, set)): + LOG.warn("Invalid type '%s' detected for" + " 'ssh_authorized_keys', expected list," + " string, dict, or set.", type(keys)) + else: + keys = set(keys) or [] + ssh_util.setup_user_keys(keys, name, options=None) return True @@ -861,5 +873,5 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", util.write_file(tz_conf, str(tz).rstrip() + "\n") # This ensures that the correct tz will be used for the system if tz_local and tz_file: - util.copy(tz_file, self.tz_local_fn) + util.copy(tz_file, tz_local) return diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 9f11b89c..005a0dd4 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -159,7 +159,7 @@ class Distro(distros.Distro): return hostname def set_timezone(self, tz): - set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) def package_command(self, command, args=None, pkgs=None): if pkgs is None: diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 7cf4a9ef..010be67d 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -131,7 +131,7 @@ class Distro(distros.Distro): return "127.0.1.1" def set_timezone(self, tz): - set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) def package_command(self, command, args=None, pkgs=None): if pkgs is None: diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 849834eb..ee23fd20 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -106,6 +106,35 @@ class Distro(distros.Distro): val = None return val + # NOVA will inject something like eth0, rewrite that to use the FreeBSD + # adapter. Since this adapter is based on the used driver, we need to + # figure out which interfaces are available. On KVM platforms this is + # vtnet0, where Xen would use xn0. + def getnetifname(self, dev): + LOG.debug("Translating network interface %s", dev) + if dev.startswith('lo'): + return dev + + n = re.search('\d+$', dev) + index = n.group(0) + + (out, err) = util.subp(['ifconfig', '-a']) + ifconfigoutput = [x for x in (out.strip()).splitlines() if len(x.split()) > 0] + for line in ifconfigoutput: + m = re.match('^\w+', line) + if m: + if m.group(0).startswith('lo'): + continue + # Just settle with the first non-lo adapter we find, since it's + # rather unlikely there will be multiple nicdrivers involved. + bsddev = m.group(0) + break + + # Replace the index with the one we're after. + bsddev = re.sub('\d+$', index, bsddev) + LOG.debug("Using network interface %s", bsddev) + return bsddev + def _read_system_hostname(self): sys_hostname = self._read_hostname(filename=None) return ('rc.conf', sys_hostname) @@ -162,18 +191,18 @@ class Distro(distros.Distro): log_adduser_cmd = ['pw', 'useradd', '-n', name] adduser_opts = { - "homedir": '-d', - "gecos": '-c', - "primary_group": '-g', - "groups": '-G', - "passwd": '-h', - "shell": '-s', - "inactive": '-E', + "homedir": '-d', + "gecos": '-c', + "primary_group": '-g', + "groups": '-G', + "passwd": '-h', + "shell": '-s', + "inactive": '-E', } adduser_flags = { - "no_user_group": '--no-user-group', - "system": '--system', - "no_log_init": '--no-log-init', + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', } redact_opts = ['passwd'] @@ -246,17 +275,21 @@ class Distro(distros.Distro): nameservers = [] searchdomains = [] dev_names = entries.keys() - for (dev, info) in entries.iteritems(): + for (device, info) in entries.iteritems(): # Skip the loopback interface. - if dev.startswith('lo'): + if device.startswith('lo'): continue + dev = self.getnetifname(device) + LOG.info('Configuring interface %s', dev) if info.get('bootproto') == 'static': - LOG.debug('Configuring dev %s with %s / %s', dev, info.get('address'), info.get('netmask')) + LOG.debug('Configuring dev %s with %s / %s', dev, + info.get('address'), info.get('netmask')) # Configure an ipv4 address. - ifconfig = info.get('address') + ' netmask ' + info.get('netmask') + ifconfig = (info.get('address') + ' netmask ' + + info.get('netmask')) # Configure the gateway. self.updatercconf('defaultrouter', info.get('gateway')) @@ -264,10 +297,10 @@ class Distro(distros.Distro): if 'dns-nameservers' in info: nameservers.extend(info['dns-nameservers']) if 'dns-search' in info: - searchservers.extend(info['dns-search']) + searchdomains.extend(info['dns-search']) else: ifconfig = 'DHCP' - + self.updatercconf('ifconfig_' + dev, ifconfig) # Try to read the /etc/resolv.conf or just start from scratch if that @@ -276,7 +309,8 @@ class Distro(distros.Distro): resolvconf = ResolvConf(util.load_file(self.resolv_conf_fn)) resolvconf.parse() except IOError: - util.logexc(LOG, "Failed to parse %s, use new empty file", self.resolv_conf_fn) + util.logexc(LOG, "Failed to parse %s, use new empty file", + self.resolv_conf_fn) resolvconf = ResolvConf('') resolvconf.parse() @@ -323,6 +357,19 @@ class Distro(distros.Distro): util.logexc(LOG, "Failed to restore %s backup", self.login_conf_fn) + def _bring_up_interface(self, device_name): + if device_name.startswith('lo'): + return + dev = self.getnetifname(device_name) + cmd = ['/etc/rc.d/netif', 'start', dev] + LOG.debug("Attempting to bring up interface %s using command %s", + dev, cmd) + # This could return 1 when the interface has already been put UP by the + # OS. This is just fine. + (_out, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warn("Error running %s: %s", cmd, err) + def install_packages(self, pkglist): return diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index c4b02de1..45c2e658 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -138,7 +138,7 @@ class Distro(distros.Distro): return hostname def set_timezone(self, tz): - set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) def package_command(self, command, args=None, pkgs=None): if pkgs is None: diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index e8abf111..1a269e08 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -98,7 +98,7 @@ class Distro(distros.Distro): rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg) return dev_names - def _dist_uses_systemd(self): + def uses_systemd(self): # Fedora 18 and RHEL 7 were the first adopters in their series (dist, vers) = util.system_info()['dist'][:2] major = (int)(vers.split('.')[0]) @@ -106,7 +106,7 @@ class Distro(distros.Distro): or (dist.startswith('Fedora') and major >= 18)) def apply_locale(self, locale, out_fn=None): - if self._dist_uses_systemd(): + if self.uses_systemd(): if not out_fn: out_fn = self.systemd_locale_conf_fn out_fn = self.systemd_locale_conf_fn @@ -119,7 +119,7 @@ class Distro(distros.Distro): rhel_util.update_sysconfig_file(out_fn, locale_cfg) def _write_hostname(self, hostname, out_fn): - if self._dist_uses_systemd(): + if self.uses_systemd(): util.subp(['hostnamectl', 'set-hostname', str(hostname)]) else: host_cfg = { @@ -135,14 +135,14 @@ class Distro(distros.Distro): return hostname def _read_system_hostname(self): - if self._dist_uses_systemd(): + if self.uses_systemd(): host_fn = self.systemd_hostname_conf_fn else: host_fn = self.hostname_conf_fn return (host_fn, self._read_hostname(host_fn)) def _read_hostname(self, filename, default=None): - if self._dist_uses_systemd(): + if self.uses_systemd(): (out, _err) = util.subp(['hostname']) if len(out): return out @@ -163,7 +163,7 @@ class Distro(distros.Distro): def set_timezone(self, tz): tz_file = self._find_tz_file(tz) - if self._dist_uses_systemd(): + if self.uses_systemd(): # Currently, timedatectl complains if invoked during startup # so for compatibility, create the link manually. util.del_file(self.tz_local_fn) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 0c751140..e69d06ff 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -28,23 +28,44 @@ LOG = logging.getLogger(__name__) SKIP_USERDATA_CODES = frozenset([httplib.NOT_FOUND]) -def maybe_json_object(text): - if not text: +class MetadataLeafDecoder(object): + """Decodes a leaf blob into something meaningful.""" + + def _maybe_json_object(self, text): + if not text: + return False + text = text.strip() + if text.startswith("{") and text.endswith("}"): + return True return False - text = text.strip() - if text.startswith("{") and text.endswith("}"): - return True - return False + + def __call__(self, field, blob): + if not blob: + return blob + if self._maybe_json_object(blob): + try: + # Assume it's json, unless it fails parsing... + return json.loads(blob) + except (ValueError, TypeError) as e: + LOG.warn("Field %s looked like a json object, but it was" + " not: %s", field, e) + if blob.find("\n") != -1: + return blob.splitlines() + return blob # See: http://bit.ly/TyoUQs # class MetadataMaterializer(object): - def __init__(self, blob, base_url, caller): + def __init__(self, blob, base_url, caller, leaf_decoder=None): self._blob = blob self._md = None self._base_url = base_url self._caller = caller + if leaf_decoder is None: + self._leaf_decoder = MetadataLeafDecoder() + else: + self._leaf_decoder = leaf_decoder def _parse(self, blob): leaves = {} @@ -90,20 +111,6 @@ class MetadataMaterializer(object): self._md = self._materialize(self._blob, self._base_url) return self._md - def _decode_leaf_blob(self, field, blob): - if not blob: - return blob - if maybe_json_object(blob): - try: - # Assume it's json, unless it fails parsing... - return json.loads(blob) - except (ValueError, TypeError) as e: - LOG.warn("Field %s looked like a json object, but it was" - " not: %s", field, e) - if blob.find("\n") != -1: - return blob.splitlines() - return blob - def _materialize(self, blob, base_url): (leaves, children) = self._parse(blob) child_contents = {} @@ -117,7 +124,7 @@ class MetadataMaterializer(object): for (field, resource) in leaves.items(): leaf_url = url_helper.combine_url(base_url, resource) leaf_blob = str(self._caller(leaf_url)) - leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob) + leaf_contents[field] = self._leaf_decoder(field, leaf_blob) joined = {} joined.update(child_contents) for field in leaf_contents.keys(): @@ -164,7 +171,8 @@ def get_instance_userdata(api_version='latest', def get_instance_metadata(api_version='latest', metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5): + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): md_url = url_helper.combine_url(metadata_address, api_version) # Note, 'meta-data' explicitly has trailing /. # this is required for CloudStack (LP: #1356855) @@ -175,7 +183,9 @@ def get_instance_metadata(api_version='latest', try: response = caller(md_url) - materializer = MetadataMaterializer(str(response), md_url, caller) + materializer = MetadataMaterializer(str(response), + md_url, caller, + leaf_decoder=leaf_decoder) md = materializer.materialize() if not isinstance(md, (dict)): md = {} diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 1bdca9f7..8d4df342 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -21,10 +21,13 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import cloudinit.util as util +from cloudinit.log import logging import re from prettytable import PrettyTable +LOG = logging.getLogger() + def netdev_info(empty=""): fields = ("hwaddr", "addr", "bcast", "mask") @@ -168,8 +171,9 @@ def route_pformat(): lines = [] try: routes = route_info() - except Exception: + except Exception as e: lines.append(util.center('Route info failed', '!', 80)) + util.logexc(LOG, "Route info failed: %s" % e) routes = None if routes is not None: fields = ['Route', 'Destination', 'Gateway', diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 0c35f83a..27658073 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -37,7 +37,9 @@ DEFAULT_METADATA = { VALID_DSMODES = ("local", "net", "pass", "disabled") FS_TYPES = ('vfat', 'iso9660') LABEL_TYPES = ('config-2',) -OPTICAL_DEVICES = tuple(('/dev/sr%s' % i for i in range(0, 2))) +POSSIBLE_MOUNTS = ('sr', 'cd') +OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS + for i in range(0, 2))) class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): @@ -70,7 +72,15 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): if not found: for dev in find_candidate_devs(): try: - results = util.mount_cb(dev, read_config_drive) + # Set mtype if freebsd and turn off sync + if dev.startswith("/dev/cd"): + mtype = "cd9660" + sync = False + else: + mtype = None + sync = True + results = util.mount_cb(dev, read_config_drive, mtype=mtype, + sync=sync) found = dev except openstack.NonReadable: pass @@ -125,7 +135,15 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): self.userdata_raw = results.get('userdata') self.version = results['version'] self.files.update(results.get('files', {})) - self.vendordata_raw = results.get('vendordata') + + vd = results.get('vendordata') + self.vendordata_pure = vd + try: + self.vendordata_raw = openstack.convert_vendordata_json(vd) + except ValueError as e: + LOG.warn("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + return True @@ -160,10 +178,10 @@ def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None): return "net" -def read_config_drive(source_dir, version="2012-08-10"): +def read_config_drive(source_dir): reader = openstack.ConfigDriveReader(source_dir) finders = [ - (reader.read_v2, [], {'version': version}), + (reader.read_v2, [], {}), (reader.read_v1, [], {}), ] excps = [] diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py new file mode 100644 index 00000000..069bdb41 --- /dev/null +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -0,0 +1,104 @@ +# vi: ts=4 expandtab +# +# Author: Neal Shrader <neal@digitalocean.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from cloudinit import log as logging +from cloudinit import util +from cloudinit import sources +from cloudinit import ec2_utils +from types import StringType +import functools + + +LOG = logging.getLogger(__name__) + +BUILTIN_DS_CONFIG = { + 'metadata_url': 'http://169.254.169.254/metadata/v1/', + 'mirrors_url': 'http://mirrors.digitalocean.com/' +} +MD_RETRIES = 0 +MD_TIMEOUT = 1 + +class DataSourceDigitalOcean(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.metadata = dict() + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), + BUILTIN_DS_CONFIG]) + self.metadata_address = self.ds_cfg['metadata_url'] + + if self.ds_cfg.get('retries'): + self.retries = self.ds_cfg['retries'] + else: + self.retries = MD_RETRIES + + if self.ds_cfg.get('timeout'): + self.timeout = self.ds_cfg['timeout'] + else: + self.timeout = MD_TIMEOUT + + def get_data(self): + caller = functools.partial(util.read_file_or_url, timeout=self.timeout, + retries=self.retries) + md = ec2_utils.MetadataMaterializer(str(caller(self.metadata_address)), + base_url=self.metadata_address, + caller=caller) + + self.metadata = md.materialize() + + if self.metadata.get('id'): + return True + else: + return False + + def get_userdata_raw(self): + return "\n".join(self.metadata['user-data']) + + def get_vendordata_raw(self): + return "\n".join(self.metadata['vendor-data']) + + def get_public_ssh_keys(self): + if type(self.metadata['public-keys']) is StringType: + return [self.metadata['public-keys']] + else: + return self.metadata['public-keys'] + + @property + def availability_zone(self): + return self.metadata['region'] + + def get_instance_id(self): + return self.metadata['id'] + + def get_hostname(self, fqdn=False): + return self.metadata['hostname'] + + def get_package_mirror_info(self): + return self.ds_cfg['mirrors_url'] + + @property + def launch_index(self): + return None + +# Used to match classes to dependencies +datasources = [ + (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + ] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 2f53c1ba..7ba60735 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -215,8 +215,7 @@ def transport_iso9660(require_iso=True): continue try: - (fname, contents) = util.mount_cb(fullp, - get_ovf_env, mtype=mtype) + (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype) except util.MountFailedError: LOG.debug("%s not mountable as iso9660" % fullp) continue diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 0970d07b..469c2e2a 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -88,11 +88,9 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls = [] url2base = {} for url in urls: - for version in openstack.OS_VERSIONS + (openstack.OS_LATEST,): - md_url = url_helper.combine_url(url, 'openstack', - version, 'meta_data.json') - md_urls.append(md_url) - url2base[md_url] = url + md_url = url_helper.combine_url(url, 'openstack') + md_urls.append(md_url) + url2base[md_url] = url (max_wait, timeout) = self._get_url_settings() start_time = time.time() @@ -119,8 +117,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): 'Crawl of openstack metadata service', read_metadata_service, args=[self.metadata_address], - kwargs={'ssl_details': self.ssl_details, - 'version': openstack.OS_HAVANA}) + kwargs={'ssl_details': self.ssl_details}) except openstack.NonReadable: return False except (openstack.BrokenMetadata, IOError): @@ -143,20 +140,20 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.version = results['version'] self.files.update(results.get('files', {})) - # if vendordata includes 'cloud-init', then read that explicitly - # for cloud-init (for namespacing). vd = results.get('vendordata') - if isinstance(vd, dict) and 'cloud-init' in vd: - self.vendordata_raw = vd['cloud-init'] - else: - self.vendordata_raw = vd + self.vendordata_pure = vd + try: + self.vendordata_raw = openstack.convert_vendordata_json(vd) + except ValueError as e: + LOG.warn("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None return True -def read_metadata_service(base_url, version=None, ssl_details=None): +def read_metadata_service(base_url, ssl_details=None): reader = openstack.MetadataReader(base_url, ssl_details=ssl_details) - return reader.read_v2(version=version) + return reader.read_v2() # Used to match classes to dependencies diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 3c6bb6aa..b7e19314 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -21,6 +21,7 @@ import abc import base64 import copy +import functools import os from cloudinit import ec2_utils @@ -48,6 +49,7 @@ OS_LATEST = 'latest' OS_FOLSOM = '2012-08-10' OS_GRIZZLY = '2013-04-04' OS_HAVANA = '2013-10-17' +# keep this in chronological order. new supported versions go at the end. OS_VERSIONS = ( OS_FOLSOM, OS_GRIZZLY, @@ -161,25 +163,27 @@ class BaseReader(object): def _read_ec2_metadata(self): pass - def _find_working_version(self, version): + def _find_working_version(self): try: - versions_available = self._fetch_available_versions(self) + versions_available = self._fetch_available_versions() except Exception as e: - LOG.warn("Unable to read openstack versions from %s due to: %s", - self.base_path, e) + LOG.debug("Unable to read openstack versions from %s due to: %s", + self.base_path, e) versions_available = [] - search_versions = [version] + list(OS_VERSIONS) + # openstack.OS_VERSIONS is stored in chronological order, so + # reverse it to check newest first. + supported = [v for v in reversed(list(OS_VERSIONS))] selected_version = OS_LATEST - for potential_version in search_versions: + + for potential_version in supported: if potential_version not in versions_available: continue selected_version = potential_version break - if selected_version != version: - LOG.warn("Version '%s' not available, attempting to use" - " version '%s' instead", version, selected_version) + LOG.debug("Selected version '%s' from %s", selected_version, + versions_available) return selected_version def _read_content_path(self, item): @@ -191,7 +195,7 @@ class BaseReader(object): path = self._path_join(self.base_path, "openstack", *path_pieces) return self._path_read(path) - def read_v2(self, version=None): + def read_v2(self): """Reads a version 2 formatted location. Return a dict with metadata, userdata, ec2-metadata, dsmode, @@ -200,6 +204,9 @@ class BaseReader(object): If not a valid location, raise a NonReadable exception. """ + load_json_anytype = functools.partial( + util.load_json, root_types=(dict, basestring, list)) + def datafiles(version): files = {} files['metadata'] = ( @@ -218,16 +225,15 @@ class BaseReader(object): files['vendordata'] = ( self._path_join("openstack", version, 'vendor_data.json'), False, - util.load_json, + load_json_anytype, ) return files - version = self._find_working_version(version) results = { 'userdata': '', 'version': 2, } - data = datafiles(version) + data = datafiles(self._find_working_version()) for (name, (path, required, translator)) in data.iteritems(): path = self._path_join(self.base_path, path) data = None @@ -239,7 +245,8 @@ class BaseReader(object): LOG.debug("Failed reading optional path %s due" " to: %s", path, e) else: - LOG.exception("Failed reading mandatory path %s", path) + LOG.debug("Failed reading mandatory path %s due" + " to: %s", path, e) else: found = True if required and not found: @@ -325,7 +332,7 @@ class ConfigDriveReader(BaseReader): path = self._path_join(self.base_path, 'openstack') found = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path))] - self._versions = tuple(found) + self._versions = found return self._versions def _read_ec2_metadata(self): @@ -418,18 +425,18 @@ class MetadataReader(BaseReader): def _fetch_available_versions(self): # <baseurl>/openstack/ returns a newline separated list of versions if self._versions is not None: - return self.os_versions + return self._versions found = [] + version_path = self._path_join(self.base_path, "openstack") content = self._path_read(version_path) for line in content.splitlines(): line = line.strip() if not line: continue found.append(line) - self._versions = tuple(found) + self._versions = found return self._versions - def _path_read(self, path): def should_retry_cb(_request_args, cause): @@ -456,3 +463,28 @@ class MetadataReader(BaseReader): return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details, timeout=self.timeout, retries=self.retries) + + +def convert_vendordata_json(data, recurse=True): + """ data: a loaded json *object* (strings, arrays, dicts). + return something suitable for cloudinit vendordata_raw. + + if data is: + None: return None + string: return string + list: return data + the list is then processed in UserDataProcessor + dict: return convert_vendordata_json(data.get('cloud-init')) + """ + if not data: + return None + if isinstance(data, (str, unicode, basestring)): + return data + if isinstance(data, list): + return copy.deepcopy(data) + if isinstance(data, dict): + if recurse is True: + return convert_vendordata_json(data.get('cloud-init'), + recurse=False) + raise ValueError("vendordata['cloud-init'] cannot be dict") + raise ValueError("Unknown data type for vendordata: %s" % type(data)) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 70a577bc..14d0cb0f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -293,7 +293,10 @@ def parse_ssh_config(fname): if not line or line.startswith("#"): lines.append(SshdConfigLine(line)) continue - (key, val) = line.split(None, 1) + try: + key, val = line.split(None, 1) + except ValueError: + key, val = line.split('=', 1) lines.append(SshdConfigLine(line, key, val)) return lines diff --git a/cloudinit/templater.py b/cloudinit/templater.py index 02f6261d..4cd3f13d 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -89,9 +89,11 @@ def detect_template(text): return CTemplate(content, searchList=[params]).respond() def jinja_render(content, params): + # keep_trailing_newline is in jinja2 2.7+, not 2.6 + add = "\n" if content.endswith("\n") else "" return JTemplate(content, undefined=jinja2.StrictUndefined, - trim_blocks=True).render(**params) + trim_blocks=True).render(**params) + add if text.find("\n") != -1: ident, rest = text.split("\n", 1) diff --git a/cloudinit/util.py b/cloudinit/util.py index bdb0f268..ee5e5c0a 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -191,11 +191,11 @@ def ExtendedTemporaryFile(**kwargs): return fh -def fork_cb(child_cb, *args): +def fork_cb(child_cb, *args, **kwargs): fid = os.fork() if fid == 0: try: - child_cb(*args) + child_cb(*args, **kwargs) os._exit(0) except: logexc(LOG, "Failed forking and calling callback %s", @@ -399,6 +399,10 @@ def get_cfg_option_str(yobj, key, default=None): return val +def get_cfg_option_int(yobj, key, default=0): + return int(get_cfg_option_str(yobj, key, default=default)) + + def system_info(): return { 'platform': platform.platform(), @@ -1146,7 +1150,7 @@ def chownbyname(fname, user=None, group=None): # this returns the specific 'mode' entry, cleanly formatted, with value def get_output_cfg(cfg, mode): ret = [None, None] - if cfg or 'output' not in cfg: + if not cfg or 'output' not in cfg: return ret outcfg = cfg['output'] @@ -1270,14 +1274,14 @@ def read_write_cmdline_url(target_fn): logexc(LOG, "Failed writing url content to %s", target_fn) -def yaml_dumps(obj): - formatted = yaml.dump(obj, - line_break="\n", - indent=4, - explicit_start=True, - explicit_end=True, - default_flow_style=False) - return formatted +def yaml_dumps(obj, explicit_start=True, explicit_end=True): + return yaml.safe_dump(obj, + line_break="\n", + indent=4, + explicit_start=explicit_start, + explicit_end=explicit_end, + default_flow_style=False, + allow_unicode=True) def ensure_dir(path, mode=None): @@ -1297,7 +1301,7 @@ def unmounter(umount): yield umount finally: if umount: - umount_cmd = ["umount", '-l', umount] + umount_cmd = ["umount", umount] subp(umount_cmd) @@ -1346,37 +1350,70 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): Mount the device, call method 'callback' passing the directory in which it was mounted, then unmount. Return whatever 'callback' returned. If data != None, also pass data to callback. + + mtype is a filesystem type. it may be a list, string (a single fsname) + or a list of fsnames. """ + + if isinstance(mtype, str): + mtypes = [mtype] + elif isinstance(mtype, (list, tuple)): + mtypes = list(mtype) + elif mtype is None: + mtypes = None + + # clean up 'mtype' input a bit based on platform. + platsys = platform.system().lower() + if platsys == "linux": + if mtypes is None: + mtypes = ["auto"] + elif platsys.endswith("bsd"): + if mtypes is None: + mtypes = ['ufs', 'cd9660', 'vfat'] + for index, mtype in enumerate(mtypes): + if mtype == "iso9660": + mtypes[index] = "cd9660" + else: + # we cannot do a smart "auto", so just call 'mount' once with no -t + mtypes = [''] + mounted = mounts() with tempdir() as tmpd: umount = False if device in mounted: mountpoint = mounted[device]['mountpoint'] else: - try: - mountcmd = ['mount'] - mountopts = [] - if rw: - mountopts.append('rw') - else: - mountopts.append('ro') - if sync: - # This seems like the safe approach to do - # (ie where this is on by default) - mountopts.append("sync") - if mountopts: - mountcmd.extend(["-o", ",".join(mountopts)]) - if mtype: - mountcmd.extend(['-t', mtype]) - mountcmd.append(device) - mountcmd.append(tmpd) - subp(mountcmd) - umount = tmpd # This forces it to be unmounted (when set) - mountpoint = tmpd - except (IOError, OSError) as exc: - raise MountFailedError(("Failed mounting %s " - "to %s due to: %s") % + for mtype in mtypes: + mountpoint = None + try: + mountcmd = ['mount'] + mountopts = [] + if rw: + mountopts.append('rw') + else: + mountopts.append('ro') + if sync: + # This seems like the safe approach to do + # (ie where this is on by default) + mountopts.append("sync") + if mountopts: + mountcmd.extend(["-o", ",".join(mountopts)]) + if mtype: + mountcmd.extend(['-t', mtype]) + mountcmd.append(device) + mountcmd.append(tmpd) + subp(mountcmd) + umount = tmpd # This forces it to be unmounted (when set) + mountpoint = tmpd + break + except (IOError, OSError) as exc: + LOG.debug("Failed mount of '%s' as '%s': %s", + device, mtype, exc) + pass + if not mountpoint: + raise MountFailedError("Failed mounting %s to %s due to: %s" % (device, tmpd, exc)) + # Be nice and ensure it ends with a slash if not mountpoint.endswith("/"): mountpoint += "/" @@ -1924,3 +1961,53 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep): raise ValueError("Missing required files: %s", ','.join(missing)) return ret + + +def read_meminfo(meminfo="/proc/meminfo", raw=False): + # read a /proc/meminfo style file and return + # a dict with 'total', 'free', and 'available' + mpliers = {'kB': 2**10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30} + kmap = {'MemTotal:': 'total', 'MemFree:': 'free', + 'MemAvailable:': 'available'} + ret = {} + for line in load_file(meminfo).splitlines(): + try: + key, value, unit = line.split() + except ValueError: + key, value = line.split() + unit = 'B' + if raw: + ret[key] = int(value) * mpliers[unit] + elif key in kmap: + ret[kmap[key]] = int(value) * mpliers[unit] + + return ret + + +def human2bytes(size): + """Convert human string or integer to size in bytes + 10M => 10485760 + .5G => 536870912 + """ + size_in = size + if size.endswith("B"): + size = size[:-1] + + mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40} + + num = size + mplier = 'B' + for m in mpliers: + if size.endswith(m): + mplier = m + num = size[0:-len(m)] + + try: + num = float(num) + except ValueError: + raise ValueError("'%s' is not valid input." % size_in) + + if num < 0: + raise ValueError("'%s': cannot be negative" % size_in) + + return int(num * mpliers[mplier]) diff --git a/cloudinit/version.py b/cloudinit/version.py index edb651a9..3d1d1d23 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -20,7 +20,7 @@ from distutils import version as vr def version(): - return vr.StrictVersion("0.7.6") + return vr.StrictVersion("0.7.7") def version_string(): |