diff options
42 files changed, 1634 insertions, 181 deletions
@@ -9,6 +9,12 @@ caused a problem on rhel5 if missing. - support individual MIME segments to be gzip compressed (LP: #1203203) - always finalize handlers even if processing failed (LP: #1203368) + - support merging into cloud-config via jsonp. (LP: #1200476) + - add datasource 'SmartOS' for Joyent Cloud. Adds a dependency on serial. + - add 'log_time' helper to util for timing how long things take + which also reads from uptime. uptime is useful as clock may change during + boot due to ntp. + - prefer growpart resizer to 'parted resizepart' (LP: #1212492) 0.7.2: - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) @@ -10,6 +10,10 @@ PrettyTable # datasource is removed, this is no longer needed oauth +# This one is currently used only by the SmartOS datasource. If that +# datasource is removed, this is no longer needed +pyserial + # This is only needed for places where we need to support configs in a manner # that the built-in config parser is not sufficent (ie # when we need to preserve comments, or do not have a top-level @@ -27,3 +31,6 @@ requests # Boto for ec2 boto + +# For patching pieces of cloud-config together +jsonpatch diff --git a/bin/cloud-init b/bin/cloud-init index c5a5b949..b4f9fd07 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -502,7 +502,9 @@ def main(): signal_handler.attach_handlers() (name, functor) = args.action - return functor(name, args) + + return util.log_time(logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, + get_uptime=True, func=functor, args=(name, args)) if __name__ == '__main__': diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 3ce3b351..5a407016 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -27,7 +27,8 @@ from cloudinit import util distros = ['ubuntu', 'debian'] PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" -PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" +APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config" +APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" # A temporary shell program to get a given gpg key # from a given keyserver @@ -67,18 +68,10 @@ def handle(name, cfg, cloud, log, _args): "security": "security.ubuntu.com/ubuntu"}) rename_apt_lists(old_mirrors, mirrors) - # Set up any apt proxy - proxy = cfg.get("apt_proxy", None) - proxy_filename = PROXY_FN - if proxy: - try: - # See man 'apt.conf' - contents = PROXY_TPL % (proxy) - util.write_file(proxy_filename, contents) - except Exception as e: - util.logexc(log, "Failed to write proxy to %s", proxy_filename) - elif os.path.isfile(proxy_filename): - util.del_file(proxy_filename) + try: + apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) + except Exception as e: + log.warn("failed to proxy or apt config info: %s", e) # Process 'apt_sources' if 'apt_sources' in cfg: @@ -256,3 +249,22 @@ def find_apt_mirror_info(cloud, cfg): mirror_info.update({'primary': mirror}) return mirror_info + + +def apply_apt_config(cfg, proxy_fname, config_fname): + # Set up any apt proxy + cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'), + ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'), + ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'), + ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";')) + + proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)] + if len(proxies): + util.write_file(proxy_fname, '\n'.join(proxies) + '\n') + elif os.path.isfile(proxy_fname): + util.del_file(proxy_fname) + + if cfg.get('apt_config', None): + util.write_file(config_fname, cfg.get('apt_config')) + elif os.path.isfile(config_fname): + util.del_file(config_fname) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 4f8c8f80..2d54aabf 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -96,7 +96,7 @@ class ResizeParted(object): def resize(self, diskdev, partnum, partdev): before = get_size(partdev) try: - util.subp(["parted", "resizepart", diskdev, partnum]) + util.subp(["parted", diskdev, "resizepart", partnum]) except util.ProcessExecutionError as e: raise ResizeFailedException(e) @@ -264,11 +264,14 @@ def handle(_name, cfg, _cloud, log, _args): raise e return - resized = resize_devices(resizer, devices) + resized = util.log_time(logfunc=log.debug, msg="resize_devices", + func=resize_devices, args=(resizer, devices)) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) else: log.debug("'%s' %s: %s" % (entry, action, msg)) -RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) +# LP: 1212444 FIXME re-order and favor ResizeParted +#RESIZERS = (('growpart', ResizeGrowPart),) +RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted)) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index b4ee16b2..56040fdd 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -21,7 +21,6 @@ import errno import os import stat -import time from cloudinit.settings import PER_ALWAYS from cloudinit import util @@ -120,9 +119,12 @@ def handle(name, cfg, _cloud, log, args): if resize_root == NOBLOCK: # Fork to a child that will run # the resize command - util.fork_cb(do_resize, resize_cmd, log) + util.fork_cb( + util.log_time(logfunc=log.debug, msg="backgrounded Resizing", + func=do_resize, args=(resize_cmd, log))) else: - do_resize(resize_cmd, log) + util.log_time(logfunc=log.debug, msg="Resizing", + func=do_resize, args=(resize_cmd, log)) action = 'Resized' if resize_root == NOBLOCK: @@ -132,13 +134,10 @@ def handle(name, cfg, _cloud, log, args): def do_resize(resize_cmd, log): - start = time.time() try: util.subp(resize_cmd) except util.ProcessExecutionError: util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd) raise - tot_time = time.time() - start - log.debug("Resizing took %.3f seconds", tot_time) # TODO(harlowja): Should we add a fsck check after this to make # sure we didn't corrupt anything? diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 249e1b19..74e95797 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -47,9 +47,11 @@ LOG = logging.getLogger(__name__) class Distro(object): __metaclass__ = abc.ABCMeta + hosts_fn = "/etc/hosts" ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users" hostname_conf_fn = "/etc/hostname" + tz_zone_dir = "/usr/share/zoneinfo" def __init__(self, name, cfg, paths): self._paths = paths @@ -66,6 +68,13 @@ class Distro(object): # to write this blob out in a distro format raise NotImplementedError() + def _find_tz_file(self, tz): + tz_file = os.path.join(self.tz_zone_dir, str(tz)) + if not os.path.isfile(tz_file): + raise IOError(("Invalid timezone %s," + " no file found at %s") % (tz, tz_file)) + return tz_file + def get_option(self, opt_name, default=None): return self._cfg.get(opt_name, default) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 0811eefd..8fe49cbe 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -44,7 +44,6 @@ class Distro(distros.Distro): network_conf_fn = "/etc/network/interfaces" tz_conf_fn = "/etc/timezone" tz_local_fn = "/etc/localtime" - tz_zone_dir = "/usr/share/zoneinfo" def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -130,12 +129,7 @@ class Distro(distros.Distro): return "127.0.1.1" def set_timezone(self, tz): - # TODO(harlowja): move this code into - # the parent distro... - tz_file = os.path.join(self.tz_zone_dir, str(tz)) - if not os.path.isfile(tz_file): - raise RuntimeError(("Invalid timezone %s," - " no file found at %s") % (tz, tz_file)) + tz_file = self._find_tz_file(tz) # Note: "" provides trailing newline during join tz_lines = [ util.make_header(), diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index a022ca60..30195384 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -20,8 +20,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import os - from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging @@ -51,7 +49,6 @@ class Distro(distros.Distro): network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s' resolve_conf_fn = "/etc/resolv.conf" tz_local_fn = "/etc/localtime" - tz_zone_dir = "/usr/share/zoneinfo" def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -164,12 +161,7 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_timezone(self, tz): - # TODO(harlowja): move this code into - # the parent distro... - tz_file = os.path.join(self.tz_zone_dir, str(tz)) - if not os.path.isfile(tz_file): - raise RuntimeError(("Invalid timezone %s," - " no file found at %s") % (tz, tz_file)) + tz_file = self._find_tz_file(tz) if self._dist_uses_systemd(): # Currently, timedatectl complains if invoked during startup # so for compatibility, create the link manually. diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index 904e931a..f2ac4efc 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -18,8 +18,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import os - from cloudinit import distros from cloudinit.distros.parsers.hostname import HostnameConf @@ -42,7 +40,6 @@ class Distro(distros.Distro): network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' resolve_conf_fn = '/etc/resolv.conf' tz_local_fn = '/etc/localtime' - tz_zone_dir = '/usr/share/zoneinfo' def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -151,12 +148,7 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_timezone(self, tz): - # TODO(harlowja): move this code into - # the parent distro... - tz_file = os.path.join(self.tz_zone_dir, str(tz)) - if not os.path.isfile(tz_file): - raise RuntimeError(("Invalid timezone %s," - " no file found at %s") % (tz, tz_file)) + tz_file = self._find_tz_file(tz) # Adjust the sysconfig clock zone setting clock_cfg = { 'TIMEZONE': str(tz), diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 1d450061..2ddc75f4 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -62,6 +62,7 @@ INCLUSION_TYPES_MAP = { '#part-handler': 'text/part-handler', '#cloud-boothook': 'text/cloud-boothook', '#cloud-config-archive': 'text/cloud-config-archive', + '#cloud-config-jsonp': 'text/cloud-config-jsonp', } # Sorted longest first diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 730672d7..34a73115 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -20,6 +20,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import jsonpatch + from cloudinit import handlers from cloudinit import log as logging from cloudinit import mergers @@ -50,6 +52,13 @@ MERGE_HEADER = 'Merge-Type' # This gets loaded into yaml with final result {'a': 22} DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') CLOUD_PREFIX = "#cloud-config" +JSONP_PREFIX = "#cloud-config-jsonp" + +# The file header -> content types this module will handle. +CC_TYPES = { + JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX), + CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX), +} class CloudConfigPartHandler(handlers.Handler): @@ -60,9 +69,7 @@ class CloudConfigPartHandler(handlers.Handler): self.file_names = [] def list_types(self): - return [ - handlers.type_from_starts_with(CLOUD_PREFIX), - ] + return list(CC_TYPES.values()) def _write_cloud_config(self): if not self.cloud_fn: @@ -108,13 +115,21 @@ class CloudConfigPartHandler(handlers.Handler): all_mergers = DEF_MERGERS return (payload_yaml, all_mergers) + def _merge_patch(self, payload): + # JSON doesn't handle comments in this manner, so ensure that + # if we started with this 'type' that we remove it before + # attempting to load it as json (which the jsonpatch library will + # attempt to do). + payload = payload.lstrip() + payload = util.strip_prefix_suffix(payload, prefix=JSONP_PREFIX) + patch = jsonpatch.JsonPatch.from_string(payload) + LOG.debug("Merging by applying json patch %s", patch) + self.cloud_buf = patch.apply(self.cloud_buf, in_place=False) + def _merge_part(self, payload, headers): (payload_yaml, my_mergers) = self._extract_mergers(payload, headers) LOG.debug("Merging by applying %s", my_mergers) merger = mergers.construct(my_mergers) - if self.cloud_buf is None: - # First time through, merge with an empty dict... - self.cloud_buf = {} self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml) def _reset(self): @@ -131,7 +146,13 @@ class CloudConfigPartHandler(handlers.Handler): self._reset() return try: - self._merge_part(payload, headers) + # First time through, merge with an empty dict... + if self.cloud_buf is None or not self.file_names: + self.cloud_buf = {} + if ctype == CC_TYPES[JSONP_PREFIX]: + self._merge_patch(payload) + else: + self._merge_part(payload, headers) # Ensure filename is ok to store for i in ("\n", "\r", "\t"): filename = filename.replace(i, " ") diff --git a/cloudinit/settings.py b/cloudinit/settings.py index dc371cd2..9f6badae 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -37,6 +37,7 @@ CFG_BUILTIN = { 'MAAS', 'Ec2', 'CloudStack', + 'SmartOS', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 0a5caebe..66d7728b 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -17,6 +17,7 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import base64 +import crypt import os import os.path import time @@ -31,9 +32,21 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] -BUILTIN_DS_CONFIG = {'datasource': {DS_NAME: { - 'agent_command': AGENT_START, - 'data_dir': "/var/lib/waagent"}}} +BOUNCE_COMMAND = ['sh', '-xc', + "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] + +BUILTIN_DS_CONFIG = { + 'agent_command': AGENT_START, + 'data_dir': "/var/lib/waagent", + 'set_hostname': True, + 'hostname_bounce': { + 'interface': 'eth0', + 'policy': True, + 'command': BOUNCE_COMMAND, + 'hostname_command': 'hostname', + } +} +DS_CFG_PATH = ['datasource', DS_NAME] class DataSourceAzureNet(sources.DataSource): @@ -42,19 +55,19 @@ class DataSourceAzureNet(sources.DataSource): self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), + BUILTIN_DS_CONFIG]) def __str__(self): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) def get_data(self): - ddir_cfgpath = ['datasource', DS_NAME, 'data_dir'] # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid - ddir = util.get_cfg_by_path(self.sys_cfg, ddir_cfgpath) - if ddir is None: - ddir = util.get_cfg_by_path(BUILTIN_DS_CONFIG, ddir_cfgpath) + ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] candidates.extend(list_possible_azure_ds_devs()) @@ -91,44 +104,46 @@ class DataSourceAzureNet(sources.DataSource): return False if found == ddir: - LOG.debug("using cached datasource in %s", ddir) - - fields = [('cmd', ['datasource', DS_NAME, 'agent_command']), - ('datadir', ddir_cfgpath)] - mycfg = {} - for cfg in (self.cfg, self.sys_cfg, BUILTIN_DS_CONFIG): - for name, path in fields: - if name in mycfg: - continue - value = util.get_cfg_by_path(cfg, keyp=path) - if value is not None: - mycfg[name] = value + LOG.debug("using files cached in %s", ddir) + + # now update ds_cfg to reflect contents pass in config + usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) + self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg]) + mycfg = self.ds_cfg # walinux agent writes files world readable, but expects # the directory to be protected. - write_files(mycfg['datadir'], files, dirmode=0700) + write_files(mycfg['data_dir'], files, dirmode=0700) + + # handle the hostname 'publishing' + try: + handle_set_hostname(mycfg.get('set_hostname'), + self.metadata.get('local-hostname'), + mycfg['hostname_bounce']) + except Exception as e: + LOG.warn("Failed publishing hostname: %s" % e) + util.logexc(LOG, "handling set_hostname failed") try: - invoke_agent(mycfg['cmd']) + invoke_agent(mycfg['agent_command']) except util.ProcessExecutionError: # claim the datasource even if the command failed - util.logexc(LOG, "agent command '%s' failed.", mycfg['cmd']) + util.logexc(LOG, "agent command '%s' failed.", + mycfg['agent_command']) - shcfgxml = os.path.join(mycfg['datadir'], "SharedConfig.xml") + shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml") wait_for = [shcfgxml] fp_files = [] for pk in self.cfg.get('_pubkeys', []): bname = pk['fingerprint'] + ".crt" - fp_files += [os.path.join(mycfg['datadir'], bname)] + fp_files += [os.path.join(mycfg['data_dir'], bname)] - start = time.time() - missing = wait_for_files(wait_for + fp_files) + missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", + func=wait_for_files, + args=(wait_for + fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) - else: - LOG.debug("waited %.3f seconds for %d files to appear", - time.time() - start, len(wait_for)) if shcfgxml in missing: LOG.warn("SharedConfig.xml missing, using static instance-id") @@ -148,6 +163,56 @@ class DataSourceAzureNet(sources.DataSource): return self.cfg +def handle_set_hostname(enabled, hostname, cfg): + if not util.is_true(enabled): + return + + if not hostname: + LOG.warn("set_hostname was true but no local-hostname") + return + + apply_hostname_bounce(hostname=hostname, policy=cfg['policy'], + interface=cfg['interface'], + command=cfg['command'], + hostname_command=cfg['hostname_command']) + + +def apply_hostname_bounce(hostname, policy, interface, command, + hostname_command="hostname"): + # set the hostname to 'hostname' if it is not already set to that. + # then, if policy is not off, bounce the interface using command + prev_hostname = util.subp(hostname_command, capture=True)[0].strip() + + util.subp([hostname_command, hostname]) + + msg = ("phostname=%s hostname=%s policy=%s interface=%s" % + (prev_hostname, hostname, policy, interface)) + + if util.is_false(policy): + LOG.debug("pubhname: policy false, skipping [%s]", msg) + return + + if prev_hostname == hostname and policy != "force": + LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg) + return + + env = os.environ.copy() + env['interface'] = interface + env['hostname'] = hostname + env['old_hostname'] = prev_hostname + + if command == "builtin": + command = BOUNCE_COMMAND + + LOG.debug("pubhname: publishing hostname [%s]", msg) + shell = not isinstance(command, (list, tuple)) + # capture=False, see comments in bug 1202758 and bug 1206164. + util.log_time(logfunc=LOG.debug, msg="publishing hostname", + get_uptime=True, func=util.subp, + kwargs={'args': command, 'shell': shell, 'capture': False, + 'env': env}) + + def crtfile_to_pubkey(fname): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') @@ -319,15 +384,21 @@ def read_azure_ovf(contents): name = child.localName.lower() simple = False + value = "" if (len(child.childNodes) == 1 and child.childNodes[0].nodeType == dom.TEXT_NODE): simple = True value = child.childNodes[0].wholeText + attrs = {k: v for k, v in child.attributes.items()} + # we accept either UserData or CustomData. If both are present # then behavior is undefined. if (name == "userdata" or name == "customdata"): - ud = base64.b64decode(''.join(value.split())) + if attrs.get('encoding') in (None, "base64"): + ud = base64.b64decode(''.join(value.split())) + else: + ud = value elif name == "username": username = value elif name == "userpassword": @@ -335,7 +406,11 @@ def read_azure_ovf(contents): elif name == "hostname": md['local-hostname'] = value elif name == "dscfg": - cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})} + if attrs.get('encoding') in (None, "base64"): + dscfg = base64.b64decode(''.join(value.split())) + else: + dscfg = value + cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})} elif name == "ssh": cfg['_pubkeys'] = load_azure_ovf_pubkeys(child) elif name == "disablesshpasswordauthentication": @@ -350,7 +425,7 @@ def read_azure_ovf(contents): if username: defuser['name'] = username if password: - defuser['password'] = password + defuser['passwd'] = encrypt_pass(password) defuser['lock_passwd'] = False if defuser: @@ -362,6 +437,10 @@ def read_azure_ovf(contents): return (md, ud, cfg) +def encrypt_pass(password, salt_id="$6$"): + return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) + + def list_possible_azure_ds_devs(): # return a sorted list of devices that might have a azure datasource devlist = [] diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py new file mode 100644 index 00000000..d348d20b --- /dev/null +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -0,0 +1,244 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Canonical Ltd. +# +# Author: Ben Howard <ben.howard@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# +# Datasource for provisioning on SmartOS. This works on Joyent +# and public/private Clouds using SmartOS. +# +# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +# The meta-data is transmitted via key/value pairs made by +# requests on the console. For example, to get the hostname, you +# would send "GET hostname" on /dev/ttyS1. +# + + +import base64 +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +import os +import os.path +import serial + +DEF_TTY_LOC = '/dev/ttyS1' +DEF_TTY_TIMEOUT = 60 +LOG = logging.getLogger(__name__) + +SMARTOS_ATTRIB_MAP = { + #Cloud-init Key : (SmartOS Key, Strip line endings) + 'local-hostname': ('hostname', True), + 'public-keys': ('root_authorized_keys', True), + 'user-script': ('user-script', False), + 'user-data': ('user-data', False), + 'iptables_disable': ('iptables_disable', True), + 'motd_sys_info': ('motd_sys_info', True), +} + +# These are values which will never be base64 encoded. +# They come from the cloud platform, not user +SMARTOS_NO_BASE64 = ['root_authorized_keys', 'motd_sys_info', + 'iptables_disable'] + + +class DataSourceSmartOS(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed_dir = os.path.join(paths.seed_dir, 'sdc') + self.is_smartdc = None + + self.seed = self.ds_cfg.get("serial_device", DEF_TTY_LOC) + self.seed_timeout = self.ds_cfg.get("serial_timeout", DEF_TTY_TIMEOUT) + self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode', + SMARTOS_NO_BASE64) + self.b64_keys = self.ds_cfg.get('base64_keys', []) + self.b64_all = self.ds_cfg.get('base64_all', False) + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + + def get_data(self): + md = {} + ud = "" + + if not os.path.exists(self.seed): + LOG.debug("Host does not appear to be on SmartOS") + return False + self.seed = self.seed + + dmi_info = dmi_data() + if dmi_info is False: + LOG.debug("No dmidata utility found") + return False + + system_uuid, system_type = dmi_info + if 'smartdc' not in system_type.lower(): + LOG.debug("Host is not on SmartOS. system_type=%s", system_type) + return False + self.is_smartdc = True + md['instance-id'] = system_uuid + + b64_keys = self.query('base64_keys', strip=True, b64=False) + if b64_keys is not None: + self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] + + b64_all = self.query('base64_all', strip=True, b64=False) + if b64_all is not None: + self.b64_all = util.is_true(b64_all) + + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): + smartos_noun, strip = attribute + md[ci_noun] = self.query(smartos_noun, strip=strip) + + if not md['local-hostname']: + md['local-hostname'] = system_uuid + + ud = None + if md['user-data']: + ud = md['user-data'] + elif md['user-script']: + ud = md['user-script'] + + self.metadata = md + self.userdata_raw = ud + return True + + def get_instance_id(self): + return self.metadata['instance-id'] + + def query(self, noun, strip=False, default=None, b64=None): + if b64 is None: + if noun in self.smartos_no_base64: + b64 = False + elif self.b64_all or noun in self.b64_keys: + b64 = True + + return query_data(noun=noun, strip=strip, seed_device=self.seed, + seed_timeout=self.seed_timeout, default=default, + b64=b64) + + +def get_serial(seed_device, seed_timeout): + """This is replaced in unit testing, allowing us to replace + serial.Serial with a mocked class. + + The timeout value of 60 seconds should never be hit. The value + is taken from SmartOS own provisioning tools. Since we are reading + each line individually up until the single ".", the transfer is + usually very fast (i.e. microseconds) to get the response. + """ + if not seed_device: + raise AttributeError("seed_device value is not set") + + ser = serial.Serial(seed_device, timeout=seed_timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % seed_device) + + return ser + + +def query_data(noun, seed_device, seed_timeout, strip=False, default=None, + b64=None): + """Makes a request to via the serial console via "GET <NOUN>" + + In the response, the first line is the status, while subsequent lines + are is the value. A blank line with a "." is used to indicate end of + response. + + If the response is expected to be base64 encoded, then set b64encoded + to true. Unfortantely, there is no way to know if something is 100% + encoded, so this method relies on being told if the data is base64 or + not. + """ + + if not noun: + return False + + ser = get_serial(seed_device, seed_timeout) + ser.write("GET %s\n" % noun.rstrip()) + status = str(ser.readline()).rstrip() + response = [] + eom_found = False + + if 'SUCCESS' not in status: + ser.close() + return default + + while not eom_found: + m = ser.readline() + if m.rstrip() == ".": + eom_found = True + else: + response.append(m) + + ser.close() + + if b64 is None: + b64 = query_data('b64-%s' % noun, seed_device=seed_device, + seed_timeout=seed_timeout, b64=False, + default=False, strip=True) + b64 = util.is_true(b64) + + resp = None + if b64 or strip: + resp = "".join(response).rstrip() + else: + resp = "".join(response) + + if b64: + try: + return base64.b64decode(resp) + except TypeError: + LOG.warn("Failed base64 decoding key '%s'", noun) + return resp + + return resp + + +def dmi_data(): + sys_uuid, sys_type = None, None + dmidecode_path = util.which('dmidecode') + if not dmidecode_path: + return False + + sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"] + try: + LOG.debug("Getting hostname from dmidecode") + (sys_uuid, _err) = util.subp(sys_uuid_cmd) + except Exception as e: + util.logexc(LOG, "Failed to get system UUID", e) + + sys_type_cmd = [dmidecode_path, "-s", "system-product-name"] + try: + LOG.debug("Determining hypervisor product name via dmidecode") + (sys_type, _err) = util.subp(sys_type_cmd) + except Exception as e: + util.logexc(LOG, "Failed to get system UUID", e) + + return sys_uuid.lower(), sys_type + + +# Used to match classes to dependencies +datasources = [ + (DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/util.py b/cloudinit/util.py index 47d71ef4..5032cc47 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1751,3 +1751,61 @@ def get_mount_info(path, log=LOG): mountinfo_path = '/proc/%s/mountinfo' % os.getpid() lines = load_file(mountinfo_path).splitlines() return parse_mount_info(path, lines, log) + + +def which(program): + # Return path of program for execution if found in path + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + _fpath, _ = os.path.split(program) + if _fpath: + if is_exe(program): + return program + else: + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, program) + if is_exe(exe_file): + return exe_file + + return None + + +def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False): + if args is None: + args = [] + if kwargs is None: + kwargs = {} + + start = time.time() + + ustart = None + if get_uptime: + try: + ustart = float(uptime()) + except ValueError: + pass + + try: + ret = func(*args, **kwargs) + finally: + delta = time.time() - start + udelta = None + if ustart is not None: + try: + udelta = float(uptime()) - ustart + except ValueError: + pass + + tmsg = " took %0.3f seconds" % delta + if get_uptime: + if isinstance(udelta, (float)): + tmsg += " (%0.2f)" % udelta + else: + tmsg += " (N/A)" + try: + logfunc(msg + tmsg) + except: + pass + return ret diff --git a/doc/examples/cloud-config-TODO.txt b/doc/examples/cloud-config-TODO.txt deleted file mode 100644 index c7ed54ab..00000000 --- a/doc/examples/cloud-config-TODO.txt +++ /dev/null @@ -1,20 +0,0 @@ -# Add apt configuration files -# Add an apt.conf.d/ file with the relevant content -# -# See apt.conf man page for more information. -# -# Defaults: -# + filename: 00-boot-conf -# -apt_conf: - - # Creates an apt proxy configuration in /etc/apt/apt.conf.d/01-proxy - - filename: "01-proxy" - content: | - Acquire::http::Proxy "http://proxy.example.org:3142/ubuntu"; - - # Add the following line to /etc/apt/apt.conf.d/00-boot-conf - # (run debconf at a critical priority) - - content: | - DPkg::Pre-Install-Pkgs:: "/usr/sbin/dpkg-preconfigure --apt -p critical|| true"; - diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index fbabcad9..65a3cdf5 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -45,4 +45,23 @@ datasource: Azure: agent_command: [service, walinuxagent, start] - + set_hostname: True + hostname_bounce: + interface: eth0 + policy: on # [can be 'on', 'off' or 'force'] + } + + SmartOS: + # Smart OS datasource works over a serial console interacting with + # a server on the other end. By default, the second serial console is the + # device. SmartOS also uses a serial timeout of 60 seconds. + serial_device: /dev/ttyS1 + serial_timeout: 60 + + # a list of keys that will not be base64 decoded even if base64_all + no_base64_decode: ['root_authorized_keys', 'motd_sys_info', + 'iptables_disable'] + # a plaintext, comma delimited list of keys whose values are b64 encoded + base64_keys: [] + # a boolean indicating that all keys not in 'no_base64_decode' are encoded + base64_all: False diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 24b4b36c..bcfd7917 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -53,6 +53,9 @@ apt_mirror_search: apt_mirror_search_dns: False # apt_proxy (configure Acquire::HTTP::Proxy) +# 'apt_http_proxy' is an alias for 'apt_proxy'. +# Also, available are 'apt_ftp_proxy' and 'apt_https_proxy'. +# These affect Acquire::FTP::Proxy and Acquire::HTTPS::Proxy respectively apt_proxy: http://my.apt.proxy:3128 # apt_pipelining (configure Acquire::http::Pipeline-Depth) diff --git a/doc/sources/azure/README.rst b/doc/sources/azure/README.rst new file mode 100644 index 00000000..8239d1fa --- /dev/null +++ b/doc/sources/azure/README.rst @@ -0,0 +1,134 @@ +================ +Azure Datasource +================ + +This datasource finds metadata and user-data from the Azure cloud platform. + +Azure Platform +-------------- +The azure cloud-platform provides initial data to an instance via an attached +CD formated in UDF. That CD contains a 'ovf-env.xml' file that provides some +information. Additional information is obtained via interaction with the +"endpoint". The ip address of the endpoint is advertised to the instance +inside of dhcp option 245. On ubuntu, that can be seen in +/var/lib/dhcp/dhclient.eth0.leases as a colon delimited hex value (example: +``option unknown-245 64:41:60:82;`` is 100.65.96.130) + +walinuxagent +------------ +In order to operate correctly, cloud-init needs walinuxagent to provide much +of the interaction with azure. In addition to "provisioning" code, walinux +does the following on the agent is a long running daemon that handles the +following things: +- generate a x509 certificate and send that to the endpoint + +waagent.conf config +~~~~~~~~~~~~~~~~~~~ +in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults. + + :: + + # disabling provisioning turns off all 'Provisioning.*' function + Provisioning.Enabled=n + # this is currently not handled by cloud-init, so let walinuxagent do it. + ResourceDisk.Format=y + ResourceDisk.MountPoint=/mnt + + +Userdata +-------- +Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init +expects that user-data will be provided as base64 encoded value inside the +text child of a element named ``UserData`` or ``CustomData`` which is a direct +child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``) +If both ``UserData`` and ``CustomData`` are provided behavior is undefined on +which will be selected. + +In the example below, user-data provided is 'this is my userdata', and the +datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``. +That agent command will take affect as if it were specified in system config. + +Example: + +.. code:: + + <wa:ProvisioningSection> + <wa:Version>1.0</wa:Version> + <LinuxProvisioningConfigurationSet + xmlns="http://schemas.microsoft.com/windowsazure" + xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> + <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType> + <HostName>myHost</HostName> + <UserName>myuser</UserName> + <UserPassword/> + <CustomData>dGhpcyBpcyBteSB1c2VyZGF0YQ===</CustomData> + <dscfg>eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0=</dscfg> + <DisableSshPasswordAuthentication>true</DisableSshPasswordAuthentication> + <SSH> + <PublicKeys> + <PublicKey> + <Fingerprint>6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7</Fingerprint> + <Path>this-value-unused</Path> + </PublicKey> + </PublicKeys> + </SSH> + </LinuxProvisioningConfigurationSet> + </wa:ProvisioningSection> + +Configuration +------------- +Configuration for the datasource can be read from the system config's or set +via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`. Content in +dscfg node is expected to be base64 encoded yaml content, and it will be +merged into the 'datasource: Azure' entry. + +The '``hostname_bounce: command``' entry can be either the literal string +'builtin' or a command to execute. The command will be invoked after the +hostname is set, and will have the 'interface' in its environment. If +``set_hostname`` is not true, then ``hostname_bounce`` will be ignored. + +An example might be: + command: ["sh", "-c", "killall dhclient; dhclient $interface"] + +.. code:: + + datasource: + agent_command + Azure: + agent_command: [service, walinuxagent, start] + set_hostname: True + hostname_bounce: + # the name of the interface to bounce + interface: eth0 + # policy can be 'on', 'off' or 'force' + policy: on + # the method 'bounce' command. + command: "builtin" + hostname_command: "hostname" + } + +hostname +-------- +When the user launches an instance, they provide a hostname for that instance. +The hostname is provided to the instance in the ovf-env.xml file as +``HostName``. + +Whatever value the instance provides in its dhcp request will resolve in the +domain returned in the 'search' request. + +The interesting issue is that a generic image will already have a hostname +configured. The ubuntu cloud images have 'ubuntu' as the hostname of the +system, and the initial dhcp request on eth0 is not guaranteed to occur after +the datasource code has been run. So, on first boot, that initial value will +be sent in the dhcp request and *that* value will resolve. + +In order to make the ``HostName`` provided in the ovf-env.xml resolve, a +dhcp request must be made with the new value. Walinuxagent (in its current +version) handles this by polling the state of hostname and bouncing ('``ifdown +eth0; ifup eth0``' the network interface if it sees that a change has been +made. + +cloud-init handles this by setting the hostname in the DataSource's 'get_data' +method via '``hostname $HostName``', and then bouncing the interface. This +behavior can be configured or disabled in the datasource config. See +'Configuration' above. diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst new file mode 100644 index 00000000..fd4e496d --- /dev/null +++ b/doc/sources/smartos/README.rst @@ -0,0 +1,72 @@ +================== +SmartOS Datasource +================== + +This datasource finds metadata and user-data from the SmartOS virtualization +platform (i.e. Joyent). + +SmartOS Platform +---------------- +The SmartOS virtualization platform meta-data to the instance via the second +serial console. On Linux, this is /dev/ttyS1. The data is a provided via a +simple protocol, where something queries for the userdata, where the console +responds with the status and if "SUCCESS" returns until a single ".\n". + +New versions of the SmartOS tooling will include support for base64 encoded data. + +Userdata +-------- + +In SmartOS parlance, user-data is a actually meta-data. This userdata can be +provided a key-value pairs. + +Cloud-init supports reading the traditional meta-data fields supported by the +SmartOS tools. These are: + * root_authorized_keys + * hostname + * enable_motd_sys_info + * iptables_disable + +Note: At this time iptables_disable and enable_motd_sys_info are read but + are not actioned. + +user-script +----------- + +SmartOS traditionally supports sending over a user-script for execution at the +rc.local level. Cloud-init supports running user-scripts as if they were +cloud-init user-data. In this sense, anything with a shell interpreter +directive will run + +user-data and user-script +------------------------- + +In the event that a user defines the meta-data key of "user-data" it will +always supercede any user-script data. This is for consistency. + +base64 +------ + +The following are exempt from base64 encoding, owing to the fact that they +are provided by SmartOS: + * root_authorized_keys + * enable_motd_sys_info + * iptables_disable + +This list can be changed through system config of variable 'no_base64_decode'. + +This means that user-script and user-data as well as other values can be +base64 encoded. Since Cloud-init can only guess as to whether or not something +is truly base64 encoded, the following meta-data keys are hints as to whether +or not to base64 decode something: + * base64_all: Except for excluded keys, attempt to base64 decode + the values. If the value fails to decode properly, it will be + returned in its text + * base64_keys: A comma deliminated list of which keys are base64 encoded. + * b64-<key>: + for any key, if there exists an entry in the metadata for 'b64-<key>' + Then 'b64-<key>' is expected to be a plaintext boolean indicating whether + or not its value is encoded. + * no_base64_decode: This is a configuration setting + (i.e. /etc/cloud/cloud.cfg.d) that sets which values should not be + base64 decoded. diff --git a/packages/bddeb b/packages/bddeb index 00bc717e..30559870 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -32,8 +32,10 @@ PKG_MP = { 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', + 'jsonpatch': 'python-json-patch', 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', + 'pyserial': 'python-serial', 'pyyaml': 'python-yaml', 'requests': 'python-requests', } @@ -94,12 +96,20 @@ def main(): default=False, action='store_true') + parser.add_argument("--init-system", dest="init_system", + help=("build deb with INIT_SYSTEM=xxx" + " (default: %(default)s"), + default=os.environ.get("INIT_SYSTEM", "upstart")) + + for ent in DEBUILD_ARGS: parser.add_argument(ent, dest="debuild_args", action='append_const', const=ent, help=("pass through '%s' to debuild" % ent)) args = parser.parse_args() + os.environ['INIT_SYSTEM'] = args.init_system + capture = True if args.verbose: capture = False diff --git a/packages/brpm b/packages/brpm index 14faea4f..8c90a0ab 100755 --- a/packages/brpm +++ b/packages/brpm @@ -39,8 +39,10 @@ PKG_MP = { 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', + 'jsonpatch': 'python-jsonpatch', 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', + 'pyserial': 'pyserial', 'pyyaml': 'PyYAML', 'requests': 'python-requests', }, @@ -49,8 +51,10 @@ PKG_MP = { 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', + 'jsonpatch': 'python-jsonpatch', 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', + 'pyserial': 'python-pyserial', 'pyyaml': 'python-yaml', 'requests': 'python-requests', } @@ -37,8 +37,8 @@ def is_f(p): INITSYS_FILES = { - 'sysvinit': [f for f in glob('sysvinit/*') if is_f(f)], - 'sysvinit_deb': [f for f in glob('sysvinit/*') if is_f(f)], + 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], + 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], 'systemd': [f for f in glob('systemd/*') if is_f(f)], 'upstart': [f for f in glob('upstart/*') if is_f(f)], } diff --git a/sysvinit/debian/cloud-config b/sysvinit/debian/cloud-config new file mode 100644 index 00000000..53322748 --- /dev/null +++ b/sysvinit/debian/cloud-config @@ -0,0 +1,64 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: cloud-config +# Required-Start: cloud-init cloud-init-local +# Required-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Cloud init modules --mode config +# Description: Cloud configuration initialization +### END INIT INFO + +# Authors: Julien Danjou <acid@debian.org> +# Juerg Haefliger <juerg.haefliger@hp.com> +# Thomas Goirand <zigo@debian.org> + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Cloud service" +NAME=cloud-init +DAEMON=/usr/bin/$NAME +DAEMON_ARGS="modules --mode config" +SCRIPTNAME=/etc/init.d/$NAME + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.2-14) to ensure that this file is present +# and status_of_proc is working. +. /lib/lsb/init-functions + +if init_is_upstart; then + case "$1" in + stop) + exit 0 + ;; + *) + exit 1 + ;; + esac +fi + +case "$1" in +start) + log_daemon_msg "Starting $DESC" "$NAME" + $DAEMON ${DAEMON_ARGS} + case "$?" in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop|restart|force-reload) + echo "Error: argument '$1' not supported" >&2 + exit 3 +;; +*) + echo "Usage: $SCRIPTNAME {start}" >&2 + exit 3 +;; +esac + +: diff --git a/sysvinit/debian/cloud-final b/sysvinit/debian/cloud-final new file mode 100644 index 00000000..55afc8b0 --- /dev/null +++ b/sysvinit/debian/cloud-final @@ -0,0 +1,66 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: cloud-final +# Required-Start: $all cloud-config +# Required-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Cloud init modules final jobs +# Description: This runs the cloud configuration initialization "final" jobs +# and can be seen as the traditional "rc.local" time for the cloud. +# It runs after all cloud-config jobs are run +### END INIT INFO + +# Authors: Julien Danjou <acid@debian.org> +# Juerg Haefliger <juerg.haefliger@hp.com> +# Thomas Goirand <zigo@debian.org> + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Cloud service" +NAME=cloud-init +DAEMON=/usr/bin/$NAME +DAEMON_ARGS="modules --mode final" +SCRIPTNAME=/etc/init.d/$NAME + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.2-14) to ensure that this file is present +# and status_of_proc is working. +. /lib/lsb/init-functions + +if init_is_upstart; then + case "$1" in + stop) + exit 0 + ;; + *) + exit 1 + ;; + esac +fi + +case "$1" in +start) + log_daemon_msg "Starting $DESC" "$NAME" + $DAEMON ${DAEMON_ARGS} + case "$?" in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop|restart|force-reload) + echo "Error: argument '$1' not supported" >&2 + exit 3 +;; +*) + echo "Usage: $SCRIPTNAME {start}" >&2 + exit 3 +;; +esac + +: diff --git a/sysvinit/debian/cloud-init b/sysvinit/debian/cloud-init new file mode 100755 index 00000000..48fa0423 --- /dev/null +++ b/sysvinit/debian/cloud-init @@ -0,0 +1,64 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: cloud-init +# Required-Start: $local_fs $remote_fs $syslog $network cloud-init-local +# Required-Stop: $remote_fs +# X-Start-Before: sshd +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Cloud init +# Description: Cloud configuration initialization +### END INIT INFO + +# Authors: Julien Danjou <acid@debian.org> +# Thomas Goirand <zigo@debian.org> + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Cloud service" +NAME=cloud-init +DAEMON=/usr/bin/$NAME +DAEMON_ARGS="init" +SCRIPTNAME=/etc/init.d/$NAME + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.2-14) to ensure that this file is present +# and status_of_proc is working. +. /lib/lsb/init-functions + +if init_is_upstart; then + case "$1" in + stop) + exit 0 + ;; + *) + exit 1 + ;; + esac +fi + +case "$1" in + start) + log_daemon_msg "Starting $DESC" "$NAME" + $DAEMON ${DAEMON_ARGS} + case "$?" in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac + ;; + stop|restart|force-reload) + echo "Error: argument '$1' not supported" >&2 + exit 3 + ;; + *) + echo "Usage: $SCRIPTNAME {start}" >&2 + exit 3 + ;; +esac + +: diff --git a/sysvinit/debian/cloud-init-local b/sysvinit/debian/cloud-init-local new file mode 100644 index 00000000..802ee8e9 --- /dev/null +++ b/sysvinit/debian/cloud-init-local @@ -0,0 +1,63 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: cloud-init-local +# Required-Start: $local_fs $remote_fs +# Required-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Cloud init local +# Description: Cloud configuration initialization +### END INIT INFO + +# Authors: Julien Danjou <acid@debian.org> +# Juerg Haefliger <juerg.haefliger@hp.com> + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Cloud service" +NAME=cloud-init +DAEMON=/usr/bin/$NAME +DAEMON_ARGS="init --local" +SCRIPTNAME=/etc/init.d/$NAME + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.2-14) to ensure that this file is present +# and status_of_proc is working. +. /lib/lsb/init-functions + +if init_is_upstart; then + case "$1" in + stop) + exit 0 + ;; + *) + exit 1 + ;; + esac +fi + +case "$1" in +start) + log_daemon_msg "Starting $DESC" "$NAME" + $DAEMON ${DAEMON_ARGS} + case "$?" in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop|restart|force-reload) + echo "Error: argument '$1' not supported" >&2 + exit 3 +;; +*) + echo "Usage: $SCRIPTNAME {start}" >&2 + exit 3 +;; +esac + +: diff --git a/sysvinit/cloud-config b/sysvinit/redhat/cloud-config index ad8ed831..ad8ed831 100755 --- a/sysvinit/cloud-config +++ b/sysvinit/redhat/cloud-config diff --git a/sysvinit/cloud-final b/sysvinit/redhat/cloud-final index aeae8903..aeae8903 100755 --- a/sysvinit/cloud-final +++ b/sysvinit/redhat/cloud-final diff --git a/sysvinit/cloud-init b/sysvinit/redhat/cloud-init index c1c92ad0..c1c92ad0 100755 --- a/sysvinit/cloud-init +++ b/sysvinit/redhat/cloud-init diff --git a/sysvinit/cloud-init-local b/sysvinit/redhat/cloud-init-local index b53e0db2..b53e0db2 100755 --- a/sysvinit/cloud-init-local +++ b/sysvinit/redhat/cloud-init-local diff --git a/tests/data/mountinfo_precise_ext4.txt b/tests/data/mountinfo_precise_ext4.txt new file mode 100644 index 00000000..a7a1db67 --- /dev/null +++ b/tests/data/mountinfo_precise_ext4.txt @@ -0,0 +1,24 @@ +15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755 +20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset +29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered +37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 diff --git a/tests/data/mountinfo_raring_btrfs.txt b/tests/data/mountinfo_raring_btrfs.txt new file mode 100644 index 00000000..c5795636 --- /dev/null +++ b/tests/data/mountinfo_raring_btrfs.txt @@ -0,0 +1,13 @@ +15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755 +20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache +21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 2e8583f9..1ca6a79d 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -2,6 +2,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceAzure from tests.unittests.helpers import populate_dir +import crypt import base64 from mocker import MockerTestCase import os @@ -26,8 +27,15 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType> """ - for key, val in data.items(): - content += "<%s>%s</%s>\n" % (key, val, key) + for key, dval in data.items(): + if isinstance(dval, dict): + val = dval.get('text') + attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v in dval.items() + if k != 'text']) + else: + val = dval + attrs = "" + content += "<%s%s>%s</%s>\n" % (key, attrs, val, key) if userdata: content += "<UserData>%s</UserData>\n" % (base64.b64encode(userdata)) @@ -103,6 +111,9 @@ class TestAzureDataSource(MockerTestCase): data['iid_from_shared_cfg'] = path return 'i-my-azure-id' + def _apply_hostname_bounce(**kwargs): + data['apply_hostname_bounce'] = kwargs + if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) @@ -118,7 +129,9 @@ class TestAzureDataSource(MockerTestCase): (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), (mod, 'iid_from_shared_config', - _iid_from_shared_config), ]) + _iid_from_shared_config), + (mod, 'apply_hostname_bounce', + _apply_hostname_bounce), ]) dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) @@ -139,10 +152,24 @@ class TestAzureDataSource(MockerTestCase): self.assertEqual(0700, data['datadir_mode']) self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id') + def test_user_cfg_set_agent_command_plain(self): + # set dscfg in via plaintext + cfg = {'agent_command': "my_command"} + odata = {'HostName': "myhost", 'UserName': "myuser", + 'dscfg': {'text': yaml.dump(cfg), 'encoding': 'plain'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(data['agent_invoked'], cfg['agent_command']) + def test_user_cfg_set_agent_command(self): + # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': yaml.dump(cfg)} + 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), + 'encoding': 'base64'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) @@ -181,11 +208,15 @@ class TestAzureDataSource(MockerTestCase): self.assertTrue('default_user' in dsrc.cfg['system_info']) defuser = dsrc.cfg['system_info']['default_user'] - # default user shoudl be updated for password and username - # and should not be locked. + # default user should be updated username and should not be locked. self.assertEqual(defuser['name'], odata['UserName']) - self.assertEqual(defuser['password'], odata['UserPassword']) self.assertFalse(defuser['lock_passwd']) + # passwd is crypt formated string $id$salt$encrypted + # encrypting plaintext with salt value of everything up to final '$' + # should equal that after the '$' + pos = defuser['passwd'].rfind("$") + 1 + self.assertEqual(defuser['passwd'], + crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) def test_userdata_found(self): mydata = "FOOBAR" @@ -218,6 +249,47 @@ class TestAzureDataSource(MockerTestCase): for mypk in mypklist: self.assertIn(mypk, dsrc.cfg['_pubkeys']) + def test_disabled_bounce(self): + pass + + def test_apply_bounce_call_1(self): + # hostname needs to get through to apply_hostname_bounce + odata = {'HostName': 'my-random-hostname'} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + self._get_ds(data).get_data() + self.assertIn('hostname', data['apply_hostname_bounce']) + self.assertEqual(data['apply_hostname_bounce']['hostname'], + odata['HostName']) + + def test_apply_bounce_call_configurable(self): + # hostname_bounce should be configurable in datasource cfg + cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off', + 'command': 'my-bounce-command', + 'hostname_command': 'my-hostname-command'}} + odata = {'HostName': "xhost", + 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), + 'encoding': 'base64'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + self._get_ds(data).get_data() + + for k in cfg['hostname_bounce']: + self.assertIn(k, data['apply_hostname_bounce']) + + for k, v in cfg['hostname_bounce'].items(): + self.assertEqual(data['apply_hostname_bounce'][k], v) + + def test_set_hostname_disabled(self): + # config specifying set_hostname off should not bounce + cfg = {'set_hostname': False} + odata = {'HostName': "xhost", + 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), + 'encoding': 'base64'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + self._get_ds(data).get_data() + + self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A") + class TestReadAzureOvf(MockerTestCase): def test_invalid_xml_raises_non_azure_ds(self): diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py new file mode 100644 index 00000000..f53715b0 --- /dev/null +++ b/tests/unittests/test_datasource/test_smartos.py @@ -0,0 +1,273 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Canonical Ltd. +# +# Author: Ben Howard <ben.howard@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# +# This is a testcase for the SmartOS datasource. It replicates a serial +# console and acts like the SmartOS console does in order to validate +# return responses. +# + +import base64 +from cloudinit import helpers +from cloudinit.sources import DataSourceSmartOS + +from mocker import MockerTestCase +import uuid + +MOCK_RETURNS = { + 'hostname': 'test-host', + 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', + 'disable_iptables_flag': None, + 'enable_motd_sys_info': None, + 'test-var1': 'some data', + 'user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), +} + +DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc') + + +class MockSerial(object): + """Fake a serial terminal for testing the code that + interfaces with the serial""" + + port = None + + def __init__(self, mockdata): + self.last = None + self.last = None + self.new = True + self.count = 0 + self.mocked_out = [] + self.mockdata = mockdata + + def open(self): + return True + + def close(self): + return True + + def isOpen(self): + return True + + def write(self, line): + line = line.replace('GET ', '') + self.last = line.rstrip() + + def readline(self): + if self.new: + self.new = False + if self.last in self.mockdata: + return 'SUCCESS\n' + else: + return 'NOTFOUND %s\n' % self.last + + if self.last in self.mockdata: + if not self.mocked_out: + self.mocked_out = [x for x in self._format_out()] + print self.mocked_out + + if len(self.mocked_out) > self.count: + self.count += 1 + return self.mocked_out[self.count - 1] + + def _format_out(self): + if self.last in self.mockdata: + _mret = self.mockdata[self.last] + try: + for l in _mret.splitlines(): + yield "%s\n" % l.rstrip() + except: + yield "%s\n" % _mret.rstrip() + + yield '.' + yield '\n' + + +class TestSmartOSDataSource(MockerTestCase): + def setUp(self): + # makeDir comes from MockerTestCase + self.tmp = self.makeDir() + + # patch cloud_dir, so our 'seed_dir' is guaranteed empty + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + + self.unapply = [] + super(TestSmartOSDataSource, self).setUp() + + def tearDown(self): + apply_patches([i for i in reversed(self.unapply)]) + super(TestSmartOSDataSource, self).tearDown() + + def apply_patches(self, patches): + ret = apply_patches(patches) + self.unapply += ret + + def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None): + mod = DataSourceSmartOS + + if mockdata is None: + mockdata = MOCK_RETURNS + + if dmi_data is None: + dmi_data = DMI_DATA_RETURN + + def _get_serial(*_): + return MockSerial(mockdata) + + def _dmi_data(): + return dmi_data + + if sys_cfg is None: + sys_cfg = {} + + if ds_cfg is not None: + sys_cfg['datasource'] = sys_cfg.get('datasource', {}) + sys_cfg['datasource']['SmartOS'] = ds_cfg + + self.apply_patches([(mod, 'get_serial', _get_serial)]) + self.apply_patches([(mod, 'dmi_data', _dmi_data)]) + dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None, + paths=self.paths) + return dsrc + + def test_seed(self): + # default seed should be /dev/ttyS1 + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals('/dev/ttyS1', dsrc.seed) + + def test_issmartdc(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue(dsrc.is_smartdc) + + def test_no_base64(self): + ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} + dsrc = self._get_ds(ds_cfg=ds_cfg) + ret = dsrc.get_data() + self.assertTrue(ret) + + def test_uuid(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(DMI_DATA_RETURN[0], dsrc.metadata['instance-id']) + + def test_root_keys(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['root_authorized_keys'], + dsrc.metadata['public-keys']) + + def test_hostname_b64(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + + def test_hostname(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + + def test_base64_all(self): + # metadata provided base64_all of true + my_returns = MOCK_RETURNS.copy() + my_returns['base64_all'] = "true" + for k in ('hostname', 'user-data'): + my_returns[k] = base64.b64encode(my_returns[k]) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + self.assertEquals(MOCK_RETURNS['user-data'], + dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['root_authorized_keys'], + dsrc.metadata['public-keys']) + self.assertEquals(MOCK_RETURNS['disable_iptables_flag'], + dsrc.metadata['iptables_disable']) + self.assertEquals(MOCK_RETURNS['enable_motd_sys_info'], + dsrc.metadata['motd_sys_info']) + + def test_b64_userdata(self): + my_returns = MOCK_RETURNS.copy() + my_returns['b64-user-data'] = "true" + my_returns['b64-hostname'] = "true" + for k in ('hostname', 'user-data'): + my_returns[k] = base64.b64encode(my_returns[k]) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['root_authorized_keys'], + dsrc.metadata['public-keys']) + + def test_b64_keys(self): + my_returns = MOCK_RETURNS.copy() + my_returns['base64_keys'] = 'hostname,ignored' + for k in ('hostname',): + my_returns[k] = base64.b64encode(my_returns[k]) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + + def test_userdata(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + + def test_disable_iptables_flag(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['disable_iptables_flag'], + dsrc.metadata['iptables_disable']) + + def test_motd_sys_info(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['enable_motd_sys_info'], + dsrc.metadata['motd_sys_info']) + + +def apply_patches(patches): + ret = [] + for (ref, name, replace) in patches: + if replace is None: + continue + orig = getattr(ref, name) + setattr(ref, name, replace) + ret.append((ref, name, orig)) + return ret diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py new file mode 100644 index 00000000..203dd2aa --- /dev/null +++ b/tests/unittests/test_handler/test_handler_apt_configure.py @@ -0,0 +1,106 @@ +from mocker import MockerTestCase + +from cloudinit import util + +from cloudinit.config import cc_apt_configure + +import os +import re + + +class TestAptProxyConfig(MockerTestCase): + def setUp(self): + super(TestAptProxyConfig, self).setUp() + self.tmp = self.makeDir() + self.pfile = os.path.join(self.tmp, "proxy.cfg") + self.cfile = os.path.join(self.tmp, "config.cfg") + + def _search_apt_config(self, contents, ptype, value): + print( + r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), + contents, "flags=re.IGNORECASE") + return(re.search( + r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), + contents, flags=re.IGNORECASE)) + + def test_apt_proxy_written(self): + cfg = {'apt_proxy': 'myproxy'} + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + contents = str(util.read_file_or_url(self.pfile)) + self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) + + def test_apt_http_proxy_written(self): + cfg = {'apt_http_proxy': 'myproxy'} + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + contents = str(util.read_file_or_url(self.pfile)) + self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) + + def test_apt_all_proxy_written(self): + cfg = {'apt_http_proxy': 'myproxy_http_proxy', + 'apt_https_proxy': 'myproxy_https_proxy', + 'apt_ftp_proxy': 'myproxy_ftp_proxy'} + + values = {'http': cfg['apt_http_proxy'], + 'https': cfg['apt_https_proxy'], + 'ftp': cfg['apt_ftp_proxy'], + } + + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + contents = str(util.read_file_or_url(self.pfile)) + + for ptype, pval in values.iteritems(): + self.assertTrue(self._search_apt_config(contents, ptype, pval)) + + def test_proxy_deleted(self): + util.write_file(self.cfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) + self.assertFalse(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + def test_proxy_replaced(self): + util.write_file(self.cfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({'apt_proxy': "foo"}, + self.pfile, self.cfile) + self.assertTrue(os.path.isfile(self.pfile)) + contents = str(util.read_file_or_url(self.pfile)) + self.assertTrue(self._search_apt_config(contents, "http", "foo")) + + def test_config_written(self): + payload = 'this is my apt config' + cfg = {'apt_config': payload} + + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.cfile)) + self.assertFalse(os.path.isfile(self.pfile)) + + self.assertEqual(str(util.read_file_or_url(self.cfile)), payload) + + def test_config_replaced(self): + util.write_file(self.pfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({'apt_config': "foo"}, + self.pfile, self.cfile) + self.assertTrue(os.path.isfile(self.cfile)) + self.assertEqual(str(util.read_file_or_url(self.cfile)), "foo") + + def test_config_deleted(self): + # if no 'apt_config' is provided, delete any previously written file + util.write_file(self.pfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) + self.assertFalse(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index b1b872b0..c0497e08 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -9,6 +9,7 @@ import errno import logging import os import re +import unittest # growpart: # mode: auto # off, on, auto, 'growpart', 'parted' @@ -121,6 +122,7 @@ class TestConfig(MockerTestCase): # Order must be correct self.mocker.order() + @unittest.skip("until LP: #1212444 fixed") def test_no_resizers_auto_is_fine(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) @@ -142,6 +144,7 @@ class TestConfig(MockerTestCase): self.assertRaises(ValueError, self.handle, self.name, config, self.cloud_init, self.log, self.args) + @unittest.skip("until LP: #1212444 fixed") def test_mode_auto_prefers_parted(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index 0ebb0484..b227616c 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -2,10 +2,13 @@ import StringIO +import gzip import logging import os from email.mime.base import MIMEBase +from email.mime.multipart import MIMEMultipart +from email.mime.application import MIMEApplication from cloudinit import handlers from cloudinit import helpers as c_helpers @@ -50,6 +53,64 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): self._log.addHandler(self._log_handler) return log_file + def test_simple_jsonp(self): + blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "qux" }, + { "op": "add", "path": "/bar", "value": "qux2" } +] +''' + + ci = stages.Init() + ci.datasource = FakeDataSource(blob) + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + ci.fetch() + ci.consume_userdata() + cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) + cc = util.load_yaml(cc_contents) + self.assertEquals(2, len(cc)) + self.assertEquals('qux', cc['baz']) + self.assertEquals('qux2', cc['bar']) + + def test_mixed_cloud_config(self): + blob_cc = ''' +#cloud-config +a: b +c: d +''' + message_cc = MIMEBase("text", "cloud-config") + message_cc.set_payload(blob_cc) + + blob_jp = ''' +#cloud-config-jsonp +[ + { "op": "replace", "path": "/a", "value": "c" }, + { "op": "remove", "path": "/c" } +] +''' + + message_jp = MIMEBase('text', "cloud-config-jsonp") + message_jp.set_payload(blob_jp) + + message = MIMEMultipart() + message.attach(message_cc) + message.attach(message_jp) + + ci = stages.Init() + ci.datasource = FakeDataSource(str(message)) + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + ci.fetch() + ci.consume_userdata() + cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) + cc = util.load_yaml(cc_contents) + self.assertEquals(1, len(cc)) + self.assertEquals('c', cc['a']) + def test_merging_cloud_config(self): blob = ''' #cloud-config @@ -118,7 +179,7 @@ p: 1 ci.datasource = FakeDataSource(data) mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) + passthrough=False) mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() @@ -129,6 +190,46 @@ p: 1 "Unhandled non-multipart (text/x-not-multipart) userdata:", log_file.getvalue()) + def test_mime_gzip_compressed(self): + """Tests that individual message gzip encoding works.""" + + def gzip_part(text): + contents = StringIO.StringIO() + f = gzip.GzipFile(fileobj=contents, mode='w') + f.write(str(text)) + f.flush() + f.close() + return MIMEApplication(contents.getvalue(), 'gzip') + + base_content1 = ''' +#cloud-config +a: 2 +''' + + base_content2 = ''' +#cloud-config +b: 3 +c: 4 +''' + + message = MIMEMultipart('test') + message.attach(gzip_part(base_content1)) + message.attach(gzip_part(base_content2)) + ci = stages.Init() + ci.datasource = FakeDataSource(str(message)) + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + ci.fetch() + ci.consume_userdata() + contents = util.load_file(ci.paths.get_ipath("cloud_config")) + contents = util.load_yaml(contents) + self.assertTrue(isinstance(contents, dict)) + self.assertEquals(3, len(contents)) + self.assertEquals(2, contents['a']) + self.assertEquals(3, contents['b']) + self.assertEquals(4, contents['c']) + def test_mime_text_plain(self): """Mime message of type text/plain is ignored but shows warning.""" ci = stages.Init() @@ -137,7 +238,7 @@ p: 1 ci.datasource = FakeDataSource(message.as_string()) mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) + passthrough=False) mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() @@ -156,7 +257,7 @@ p: 1 outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) + passthrough=False) mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) mock_write(outpath, script, 0700) self.mocker.replay() @@ -176,7 +277,7 @@ p: 1 outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) + passthrough=False) mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) mock_write(outpath, script, 0700) self.mocker.replay() diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 5853cb0f..87415cb5 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -6,6 +6,7 @@ import yaml from mocker import MockerTestCase from unittest import TestCase +from tests.unittests import helpers from cloudinit import importer from cloudinit import util @@ -250,50 +251,10 @@ class TestLoadYaml(TestCase): myobj) -class TestMountinfoParsing(TestCase): - precise_ext4_mountinfo = \ -"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755 -20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered -21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755 -22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset -29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu -30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct -31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory -32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices -33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer -34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio -35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event -36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered -37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw -39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000""" - - raring_btrfs_mountinfo = \ -"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755 -20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache -21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache""" - +class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_invalid_mountinfo(self): - line = "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered" + line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root" + "rw,errors=remount-ro,data=ordered") elements = line.split() for i in range(len(elements) + 1): lines = [' '.join(elements[0:i])] @@ -304,7 +265,8 @@ class TestMountinfoParsing(TestCase): self.assertEqual(expected, util.parse_mount_info('/', lines)) def test_precise_ext4_root(self): - lines = TestMountinfoParsing.precise_ext4_mountinfo.splitlines() + + lines = self.readResource('mountinfo_precise_ext4.txt').splitlines() expected = ('/dev/mapper/vg0-root', 'ext4', '/') self.assertEqual(expected, util.parse_mount_info('/', lines)) @@ -326,7 +288,7 @@ class TestMountinfoParsing(TestCase): self.assertEqual(expected, util.parse_mount_info('/run/lock', lines)) def test_raring_btrfs_root(self): - lines = TestMountinfoParsing.raring_btrfs_mountinfo.splitlines() + lines = self.readResource('mountinfo_raring_btrfs.txt').splitlines() expected = ('/dev/vda1', 'btrfs', '/') self.assertEqual(expected, util.parse_mount_info('/', lines)) diff --git a/tools/read-dependencies b/tools/read-dependencies index cadb09a8..87db5d83 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -12,20 +12,20 @@ find_root() { [ $? -eq 0 -a -f "${topd}/setup.py" ] || return ROOT_DIR="$topd" } +fail() { echo "$0:" "$@" 1>&2; exit 1; } if ! find_root; then - echo "Unable to locate 'setup.py' file that should" \ - "exist in the cloud-init root directory." 1>&2 - exit 1; + fail "Unable to locate 'setup.py' file that should " \ + "exist in the cloud-init root directory." fi REQUIRES="$ROOT_DIR/Requires" if [ ! -e "$REQUIRES" ]; then - echo "Unable to find 'Requires' file located at $REQUIRES" - exit 1 + fail "Unable to find 'Requires' file located at '$REQUIRES'" fi -# Filter out comments and empty liens -DEPS=$(grep -Pv "^\s*#" "$REQUIRES" | grep -Pv '^\s*$') +# Filter out comments and empty lines +DEPS=$(sed -n -e 's,#.*,,' -e '/./p' "$REQUIRES") || + fail "failed to read deps from '${REQUIRES}'" echo "$DEPS" | sort -d -f diff --git a/tools/read-version b/tools/read-version index c76b24a9..c37228f8 100755 --- a/tools/read-version +++ b/tools/read-version @@ -12,20 +12,20 @@ find_root() { [ $? -eq 0 -a -f "${topd}/setup.py" ] || return ROOT_DIR="$topd" } +fail() { echo "$0:" "$@" 1>&2; exit 1; } if ! find_root; then - echo "Unable to locate 'setup.py' file that should" \ - "exist in the cloud-init root directory." 1>&2 - exit 1; + fail "Unable to locate 'setup.py' file that should " \ + "exist in the cloud-init root directory." fi CHNG_LOG="$ROOT_DIR/ChangeLog" -if [ ! -e "$CHNG_LOG" ] -then - echo "Unable to find 'ChangeLog' file located at $CHNG_LOG" - exit 1 +if [ ! -e "$CHNG_LOG" ]; then + fail "Unable to find 'ChangeLog' file located at '$CHNG_LOG'" fi -VERSION=$(grep -P "\d+.\d+.\d+:" "$CHNG_LOG" | cut -f1 -d ":" | head -n 1) +VERSION=$(sed -n '/^[0-9]\+[.][0-9]\+[.][0-9]\+:/ {s/://; p; :a;n; ba}' \ + "$CHNG_LOG") || + fail "failed to get version from '$CHNG_LOG'" echo "$VERSION" |