summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog18
-rwxr-xr-xbin/cloud-init28
-rw-r--r--cloudinit/cloud.py11
-rw-r--r--cloudinit/config/cc_apt_pipelining.py4
-rw-r--r--cloudinit/config/cc_apt_update_upgrade.py136
-rw-r--r--cloudinit/config/cc_bootcmd.py2
-rw-r--r--cloudinit/config/cc_emit_upstart.py48
-rw-r--r--cloudinit/config/cc_final_message.py6
-rw-r--r--cloudinit/config/cc_landscape.py4
-rw-r--r--cloudinit/config/cc_mounts.py2
-rw-r--r--cloudinit/config/cc_puppet.py5
-rw-r--r--cloudinit/config/cc_resizefs.py8
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py4
-rw-r--r--cloudinit/config/cc_set_passwords.py16
-rw-r--r--cloudinit/config/cc_ssh.py23
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py96
-rw-r--r--cloudinit/config/cc_ssh_import_id.py68
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py2
-rw-r--r--cloudinit/config/cc_update_hostname.py2
-rw-r--r--cloudinit/config/cc_users_groups.py78
-rw-r--r--cloudinit/config/cc_write_files.py4
-rw-r--r--cloudinit/distros/__init__.py285
-rw-r--r--cloudinit/distros/debian.py27
-rw-r--r--cloudinit/distros/fedora.py3
-rw-r--r--cloudinit/distros/rhel.py4
-rw-r--r--cloudinit/distros/ubuntu.py8
-rw-r--r--cloudinit/filters/__init__.py21
-rw-r--r--cloudinit/filters/launch_index.py75
-rw-r--r--cloudinit/handlers/__init__.py2
-rw-r--r--cloudinit/handlers/shell_script.py2
-rw-r--r--cloudinit/log.py2
-rw-r--r--cloudinit/settings.py5
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py299
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py6
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py326
-rw-r--r--cloudinit/sources/DataSourceEc2.py48
-rw-r--r--cloudinit/sources/DataSourceNone.py61
-rw-r--r--cloudinit/sources/__init__.py44
-rw-r--r--cloudinit/ssh_util.py94
-rw-r--r--cloudinit/stages.py6
-rw-r--r--cloudinit/user_data.py86
-rw-r--r--cloudinit/util.py41
-rw-r--r--config/cloud.cfg26
-rw-r--r--doc/examples/cloud-config-archive-launch-index.txt30
-rw-r--r--doc/examples/cloud-config-launch-index.txt23
-rw-r--r--doc/examples/cloud-config-user-groups.txt94
-rw-r--r--doc/examples/cloud-config.txt3
-rw-r--r--doc/sources/altcloud/README65
-rw-r--r--doc/sources/configdrive/README (renamed from doc/configdrive/README)0
-rw-r--r--doc/sources/kernel-cmdline.txt (renamed from doc/kernel-cmdline.txt)0
-rw-r--r--doc/sources/nocloud/README (renamed from doc/nocloud/README)0
-rw-r--r--doc/sources/ovf/README (renamed from doc/ovf/README)0
-rw-r--r--doc/sources/ovf/example/ovf-env.xml (renamed from doc/ovf/example/ovf-env.xml)0
-rw-r--r--doc/sources/ovf/example/ubuntu-server.ovf (renamed from doc/ovf/example/ubuntu-server.ovf)0
-rwxr-xr-xdoc/sources/ovf/make-iso (renamed from doc/ovf/make-iso)0
-rw-r--r--doc/sources/ovf/ovf-env.xml.tmpl (renamed from doc/ovf/ovf-env.xml.tmpl)0
-rw-r--r--doc/sources/ovf/ovfdemo.pem (renamed from doc/ovf/ovfdemo.pem)0
-rw-r--r--doc/sources/ovf/user-data (renamed from doc/ovf/user-data)0
-rw-r--r--systemd/cloud-config.service6
-rw-r--r--systemd/cloud-final.service6
-rw-r--r--systemd/cloud-init-local.service6
-rw-r--r--systemd/cloud-init.service6
-rw-r--r--templates/sources.list.tmpl12
-rw-r--r--tests/data/filter_cloud_multipart.yaml30
-rw-r--r--tests/data/filter_cloud_multipart_1.email11
-rw-r--r--tests/data/filter_cloud_multipart_2.email39
-rw-r--r--tests/data/filter_cloud_multipart_header.email11
-rw-r--r--tests/unittests/helpers.py42
-rw-r--r--tests/unittests/test__init__.py14
-rw-r--r--tests/unittests/test_builtin_handlers.py4
-rw-r--r--tests/unittests/test_datasource/test_altcloud.py445
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py177
-rw-r--r--tests/unittests/test_datasource/test_maas.py22
-rw-r--r--tests/unittests/test_distros/test_generic.py121
-rw-r--r--tests/unittests/test_filters/test_launch_index.py134
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py18
-rw-r--r--tests/unittests/test_userdata.py13
-rw-r--r--tests/unittests/test_util.py4
-rwxr-xr-xtools/hacking.py4
-rwxr-xr-xtools/mock-meta.py24
-rw-r--r--upstart/cloud-init-container.conf51
-rw-r--r--upstart/cloud-init-nonet.conf2
82 files changed, 3041 insertions, 412 deletions
diff --git a/ChangeLog b/ChangeLog
index 8ee50d2a..aca34d6e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,22 @@
0.7.0:
+ - do not 'start networking' in cloud-init-nonet, but add
+ cloud-init-container job that runs only if in container and emits
+ net-device-added (LP: #1031065)
+ - search only top level dns for 'instance-data' in DataSourceEc2 (LP: #1040200)
+ - add support for config-drive-v2 (LP:#1037567)
+ - support creating users, including the default user.
+ [Ben Howard] (LP: #1028503)
+ - add apt_reboot_if_required to reboot if an upgrade or package installation
+ forced the need for one (LP: #1038108)
+ - allow distro mirror selection to include availability-zone (LP: #1037727)
+ - allow arch specific mirror selection (select ports.ubuntu.com on arm)
+ LP: #1028501
+ - allow specification of security mirrors (LP: #1006963)
+ - add the 'None' datasource (LP: #906669), which will allow jobs
+ to run even if there is no "real" datasource found.
+ - write ssh authorized keys to console, ssh_authkey_fingerprints
+ config module [Joshua Harlow] (LP: #1010582)
+ - Added RHEVm and vSphere support as source AltCloud [Joseph VLcek]
- add write-files module (LP: #1012854)
- Add setuptools + cheetah to debian package build dependencies (LP: #1022101)
- Adjust the sysvinit local script to provide 'cloud-init-local' and have
diff --git a/bin/cloud-init b/bin/cloud-init
index 3ecc3dad..1f017475 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -100,29 +100,6 @@ def welcome_format(action):
return templater.render_string(WELCOME_MSG_TPL, tpl_params)
-def emit_cloud_config_ready(cfg, cfgpath):
- # emit the cloud config ready event
- # this can be used by upstart jobs for 'start on cloud-config'. There is a
- # builtin value for 'cc_ready_cmd' if that is not overidden by config, then
- # we check to make sure that /sbin/initctl is availble. This makes it so
- # that non-ubuntu distro will just no-op here with no explicit config.
- cmd = None
- if 'cc_ready_cmd' in cfg:
- cmd = cfg['cc_ready_cmd']
- if isinstance(cmd, str):
- cmd = ['sh', '-c', cmd]
- elif os.path.isfile("/sbin/initctl"):
- cmd = ['initctl', 'emit', 'cloud-config',
- 'CLOUD_CFG=%s' % cfgpath]
-
- if not cmd:
- return
- try:
- util.subp(cmd)
- except:
- LOG.warn("emission of cloud-config event failed")
-
-
def extract_fns(args):
# Files are already opened so lets just pass that along
# since it would of broke if it couldn't have
@@ -361,11 +338,6 @@ def main_modules(action_name, args):
# now that logging is setup and stdout redirected, send welcome
welcome(name, msg=w_msg)
- # send the cloud-config event.
- if name == "config":
- emit_cloud_config_ready(mods.cfg,
- init.paths.get_ipath_cur("cloud_config"))
-
# Stage 5
return run_module_section(mods, name, name)
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 22d9167e..95e0cfb2 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -70,21 +70,22 @@ class Cloud(object):
return fn
# The rest of thes are just useful proxies
- def get_userdata(self):
- return self.datasource.get_userdata()
+ def get_userdata(self, apply_filter=True):
+ return self.datasource.get_userdata(apply_filter)
def get_instance_id(self):
return self.datasource.get_instance_id()
+ @property
+ def launch_index(self):
+ return self.datasource.launch_index
+
def get_public_ssh_keys(self):
return self.datasource.get_public_ssh_keys()
def get_locale(self):
return self.datasource.get_locale()
- def get_local_mirror(self):
- return self.datasource.get_local_mirror()
-
def get_hostname(self, fqdn=False):
return self.datasource.get_hostname(fqdn=fqdn)
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 3426099e..02056ee0 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -16,8 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit import util
from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
frequency = PER_INSTANCE
@@ -50,7 +50,7 @@ def handle(_name, cfg, cloud, log, _args):
def write_apt_snippet(cloud, setting, log, f_name):
- """ Writes f_name with apt pipeline depth 'setting' """
+ """Writes f_name with apt pipeline depth 'setting'."""
file_contents = APT_PIPE_TPL % (setting)
diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py
index 1bffa47d..356bb98d 100644
--- a/cloudinit/config/cc_apt_update_upgrade.py
+++ b/cloudinit/config/cc_apt_update_upgrade.py
@@ -20,6 +20,7 @@
import glob
import os
+import time
from cloudinit import templater
from cloudinit import util
@@ -50,20 +51,25 @@ def handle(name, cfg, cloud, log, _args):
upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)
release = get_release()
- mirror = find_apt_mirror(cloud, cfg)
- if not mirror:
+ mirrors = find_apt_mirror_info(cloud, cfg)
+ if not mirrors or "primary" not in mirrors:
log.debug(("Skipping module named %s,"
" no package 'mirror' located"), name)
return
- log.debug("Selected mirror at: %s" % mirror)
+ # backwards compatibility
+ mirror = mirrors["primary"]
+ mirrors["mirror"] = mirror
+
+ log.debug("mirror info: %s" % mirrors)
if not util.get_cfg_option_bool(cfg,
'apt_preserve_sources_list', False):
- generate_sources_list(release, mirror, cloud, log)
- old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror',
- "archive.ubuntu.com/ubuntu")
- rename_apt_lists(old_mir, mirror)
+ generate_sources_list(release, mirrors, cloud, log)
+ old_mirrors = cfg.get('apt_old_mirrors',
+ {"primary": "archive.ubuntu.com/ubuntu",
+ "security": "security.ubuntu.com/ubuntu"})
+ rename_apt_lists(old_mirrors, mirrors)
# Set up any apt proxy
proxy = cfg.get("apt_proxy", None)
@@ -81,8 +87,10 @@ def handle(name, cfg, cloud, log, _args):
# Process 'apt_sources'
if 'apt_sources' in cfg:
- errors = add_sources(cloud, cfg['apt_sources'],
- {'MIRROR': mirror, 'RELEASE': release})
+ params = mirrors
+ params['RELEASE'] = release
+ params['MIRROR'] = mirror
+ errors = add_sources(cloud, cfg['apt_sources'], params)
for e in errors:
log.warn("Source Error: %s", ':'.join(e))
@@ -118,6 +126,20 @@ def handle(name, cfg, cloud, log, _args):
util.logexc(log, "Failed to install packages: %s ", pkglist)
errors.append(e)
+ # kernel and openssl (possibly some other packages)
+ # write a file /var/run/reboot-required after upgrading.
+ # if that file exists and configured, then just stop right now and reboot
+ # TODO(smoser): handle this less voilently
+ reboot_file = "/var/run/reboot-required"
+ if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and
+ os.path.isfile(reboot_file)):
+ log.warn("rebooting after upgrade or install per %s" % reboot_file)
+ time.sleep(1) # give the warning time to get out
+ util.subp(["/sbin/reboot"])
+ time.sleep(60)
+ log.warn("requested reboot did not happen!")
+ errors.append(Exception("requested reboot did not happen!"))
+
if len(errors):
log.warn("%s failed with exceptions, re-raising the last one",
len(errors))
@@ -146,15 +168,18 @@ def mirror2lists_fileprefix(mirror):
return string
-def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):
- oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror))
- nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror))
- if oprefix == nprefix:
- return
- olen = len(oprefix)
- for filename in glob.glob("%s_*" % oprefix):
- # TODO use the cloud.paths.join...
- util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
+def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
+ for (name, omirror) in old_mirrors.iteritems():
+ nmirror = new_mirrors.get(name)
+ if not nmirror:
+ continue
+ oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror))
+ nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror))
+ if oprefix == nprefix:
+ continue
+ olen = len(oprefix)
+ for filename in glob.glob("%s_*" % oprefix):
+ util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
def get_release():
@@ -162,14 +187,17 @@ def get_release():
return stdout.strip()
-def generate_sources_list(codename, mirror, cloud, log):
+def generate_sources_list(codename, mirrors, cloud, log):
template_fn = cloud.get_template_filename('sources.list')
- if template_fn:
- params = {'mirror': mirror, 'codename': codename}
- out_fn = cloud.paths.join(False, '/etc/apt/sources.list')
- templater.render_to_file(template_fn, out_fn, params)
- else:
+ if not template_fn:
log.warn("No template found, not rendering /etc/apt/sources.list")
+ return
+
+ params = {'codename': codename}
+ for k in mirrors:
+ params[k] = mirrors[k]
+ out_fn = cloud.paths.join(False, '/etc/apt/sources.list')
+ templater.render_to_file(template_fn, out_fn, params)
def add_sources(cloud, srclist, template_params=None):
@@ -231,43 +259,47 @@ def add_sources(cloud, srclist, template_params=None):
return errorlist
-def find_apt_mirror(cloud, cfg):
- """ find an apt_mirror given the cloud and cfg provided """
+def find_apt_mirror_info(cloud, cfg):
+ """find an apt_mirror given the cloud and cfg provided."""
mirror = None
- cfg_mirror = cfg.get("apt_mirror", None)
- if cfg_mirror:
- mirror = cfg["apt_mirror"]
- elif "apt_mirror_search" in cfg:
- mirror = util.search_for_mirror(cfg['apt_mirror_search'])
- else:
- mirror = cloud.get_local_mirror()
+ # this is less preferred way of specifying mirror preferred would be to
+ # use the distro's search or package_mirror.
+ mirror = cfg.get("apt_mirror", None)
- mydom = ""
+ search = cfg.get("apt_mirror_search", None)
+ if not mirror and search:
+ mirror = util.search_for_mirror(search)
+ if (not mirror and
+ util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
+ mydom = ""
doms = []
- if not mirror:
- # if we have a fqdn, then search its domain portion first
- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- mydom = ".".join(fqdn.split(".")[1:])
- if mydom:
- doms.append(".%s" % mydom)
+ # if we have a fqdn, then search its domain portion first
+ (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ mydom = ".".join(fqdn.split(".")[1:])
+ if mydom:
+ doms.append(".%s" % mydom)
+
+ doms.extend((".localdomain", "",))
- if (not mirror and
- util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
- doms.extend((".localdomain", "",))
+ mirror_list = []
+ distro = cloud.distro.name
+ mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
+ for post in doms:
+ mirror_list.append(mirrorfmt % (post))
- mirror_list = []
- distro = cloud.distro.name
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
- for post in doms:
- mirror_list.append(mirrorfmt % (post))
+ mirror = util.search_for_mirror(mirror_list)
- mirror = util.search_for_mirror(mirror_list)
+ mirror_info = cloud.datasource.get_package_mirror_info()
- if not mirror:
- mirror = cloud.distro.get_package_mirror()
+ # this is a bit strange.
+ # if mirror is set, then one of the legacy options above set it
+ # but they do not cover security. so we need to get that from
+ # get_package_mirror_info
+ if mirror:
+ mirror_info.update({'primary': mirror})
- return mirror
+ return mirror_info
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index bae1ea54..896cb4d0 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -20,8 +20,8 @@
import os
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
frequency = PER_ALWAYS
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
new file mode 100644
index 00000000..6d376184
--- /dev/null
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -0,0 +1,48 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
+
+frequency = PER_ALWAYS
+
+distros = ['ubuntu', 'debian']
+
+
+def handle(name, _cfg, cloud, log, args):
+ event_names = args
+ if not event_names:
+ # Default to the 'cloud-config'
+ # event for backwards compat.
+ event_names = ['cloud-config']
+ if not os.path.isfile("/sbin/initctl"):
+ log.debug(("Skipping module named %s,"
+ " no /sbin/initctl located"), name)
+ return
+ cfgpath = cloud.paths.get_ipath_cur("cloud_config")
+ for n in event_names:
+ cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
+ try:
+ util.subp(cmd)
+ except Exception as e:
+ # TODO(harlowja), use log exception from utils??
+ log.warn("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index aff03c4e..6b864fda 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -28,7 +28,7 @@ frequency = PER_ALWAYS
# Cheetah formated default message
FINAL_MESSAGE_DEF = ("Cloud-init v. ${version} finished at ${timestamp}."
- " Up ${uptime} seconds.")
+ " Datasource ${datasource}. Up ${uptime} seconds")
def handle(_name, cfg, cloud, log, args):
@@ -51,6 +51,7 @@ def handle(_name, cfg, cloud, log, args):
'uptime': uptime,
'timestamp': ts,
'version': cver,
+ 'datasource': str(cloud.datasource),
}
util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
console=False, stderr=True)
@@ -63,3 +64,6 @@ def handle(_name, cfg, cloud, log, args):
util.write_file(boot_fin_fn, contents)
except:
util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
+
+ if cloud.datasource.is_disconnected:
+ log.warn("Used fallback datasource")
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 906a6ff7..7cfb8296 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -31,6 +31,7 @@ from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
+LS_DEFAULT_FILE = "/etc/default/landscape-client"
distros = ['ubuntu']
@@ -78,6 +79,9 @@ def handle(_name, cfg, cloud, log, _args):
util.write_file(lsc_client_fn, contents.getvalue())
log.debug("Wrote landscape config file to %s", lsc_client_fn)
+ if ls_cloudcfg:
+ util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
+
def merge_together(objs):
"""
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index d3dcf7af..14c965bb 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -92,7 +92,7 @@ def handle(_name, cfg, cloud, log, _args):
# in case the user did not quote a field (likely fs-freq, fs_passno)
# but do not convert None to 'None' (LP: #898365)
for j in range(len(cfgmnt[i])):
- if j is None:
+ if cfgmnt[i][j] is None:
continue
else:
cfgmnt[i][j] = str(cfgmnt[i][j])
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 467c1496..74ee18e1 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -48,7 +48,8 @@ def handle(name, cfg, cloud, log, _args):
# Create object for reading puppet.conf values
puppet_config = helpers.DefaultingConfigParser()
# Read puppet.conf values from original file in order to be able to
- # mix the rest up. First clean them up (TODO is this really needed??)
+ # mix the rest up. First clean them up
+ # (TODO(harlowja) is this really needed??)
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
puppet_config.readfp(StringIO(cleaned_contents),
@@ -80,7 +81,7 @@ def handle(name, cfg, cloud, log, _args):
for (o, v) in cfg.iteritems():
if o == 'certname':
# Expand %f as the fqdn
- # TODO should this use the cloud fqdn??
+ # TODO(harlowja) should this use the cloud fqdn??
v = v.replace("%f", socket.getfqdn())
# Expand %i as the instance id
v = v.replace("%i", cloud.get_instance_id())
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 256a194f..e7f27944 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -22,8 +22,8 @@ import os
import stat
import time
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
frequency = PER_ALWAYS
@@ -72,12 +72,12 @@ def handle(name, cfg, cloud, log, args):
log.debug("Skipping module named %s, resizing disabled", name)
return
- # TODO is the directory ok to be used??
+ # TODO(harlowja) is the directory ok to be used??
resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
resize_root_d = cloud.paths.join(False, resize_root_d)
util.ensure_dir(resize_root_d)
- # TODO: allow what is to be resized to be configurable??
+ # TODO(harlowja): allow what is to be resized to be configurable??
resize_what = cloud.paths.join(False, "/")
with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.",
dir=resize_root_d, delete=True) as tfh:
@@ -136,5 +136,5 @@ def do_resize(resize_cmd, log):
raise
tot_time = time.time() - start
log.debug("Resizing took %.3f seconds", tot_time)
- # TODO: Should we add a fsck check after this to make
+ # TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 45d41b3f..4bf18516 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -37,9 +37,9 @@
import os
+from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
-from cloudinit.settings import PER_INSTANCE
from urlparse import parse_qs
@@ -72,7 +72,7 @@ def handle(name, _cfg, cloud, log, _args):
captured_excps = []
# These will eventually be then ran by the cc_scripts_user
- # TODO: maybe this should just be a new user data handler??
+ # TODO(harlowja): maybe this should just be a new user data handler??
# Instead of a late module that acts like a user data handler?
scripts_d = cloud.get_ipath_cur('scripts')
urls = mdict[MY_HOOKNAME]
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index ab266741..a017e6b6 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -50,8 +50,20 @@ def handle(_name, cfg, cloud, log, args):
expire = util.get_cfg_option_bool(chfg, 'expire', expire)
if not plist and password:
- user = util.get_cfg_option_str(cfg, "user", "ubuntu")
- plist = "%s:%s" % (user, password)
+ user = cloud.distro.get_default_user()
+
+ if 'users' in cfg:
+
+ user_zero = cfg['users'][0]
+
+ if isinstance(user_zero, dict) and 'name' in user_zero:
+ user = user_zero['name']
+
+ if user:
+ plist = "%s:%s" % (user, password)
+
+ else:
+ log.warn("No default or defined user to change password for.")
errors = []
if plist:
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 4019ae90..0ded62ba 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -18,11 +18,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
import glob
+import os
-from cloudinit import util
from cloudinit import ssh_util
+from cloudinit import util
DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" "
@@ -76,7 +76,7 @@ def handle(_name, cfg, cloud, log, _args):
pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
try:
- # TODO: Is this guard needed?
+ # TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
util.subp(cmd, capture=False)
log.debug("Generated a key for %s from %s", pair[0], pair[1])
@@ -94,7 +94,7 @@ def handle(_name, cfg, cloud, log, _args):
if not os.path.exists(keyfile):
cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
try:
- # TODO: Is this guard needed?
+ # TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
util.subp(cmd, capture=False)
except:
@@ -102,7 +102,16 @@ def handle(_name, cfg, cloud, log, _args):
" %s to file %s"), keytype, keyfile)
try:
- user = util.get_cfg_option_str(cfg, 'user')
+ # TODO(utlemming): consolidate this stanza that occurs in:
+ # cc_ssh_import_id, cc_set_passwords, maybe cc_users_groups.py
+ user = cloud.distro.get_default_user()
+
+ if 'users' in cfg:
+ user_zero = cfg['users'][0]
+
+ if user_zero != "default":
+ user = user_zero
+
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
DISABLE_ROOT_OPTS)
@@ -124,7 +133,9 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts):
if user:
ssh_util.setup_user_keys(keys, user, '', paths)
- if disable_root and user:
+ if disable_root:
+ if not user:
+ user = "NONE"
key_prefix = disable_root_opts.replace('$USER', user)
else:
key_prefix = ''
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
new file mode 100644
index 00000000..23f5755a
--- /dev/null
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -0,0 +1,96 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import base64
+import hashlib
+
+from prettytable import PrettyTable
+
+from cloudinit import ssh_util
+from cloudinit import util
+
+
+def _split_hash(bin_hash):
+ split_up = []
+ for i in xrange(0, len(bin_hash), 2):
+ split_up.append(bin_hash[i:i + 2])
+ return split_up
+
+
+def _gen_fingerprint(b64_text, hash_meth='md5'):
+ if not b64_text:
+ return ''
+ # TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
+ try:
+ hasher = hashlib.new(hash_meth)
+ hasher.update(base64.b64decode(b64_text))
+ return ":".join(_split_hash(hasher.hexdigest()))
+ except TypeError:
+ # Raised when b64 not really b64...
+ return '?'
+
+
+def _is_printable_key(entry):
+ if any([entry.keytype, entry.base64, entry.comment, entry.options]):
+ if (entry.keytype and
+ entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
+ return True
+ return False
+
+
+def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
+ prefix='ci-info: '):
+ if not key_entries:
+ message = ("%sno authorized ssh keys fingerprints found for user %s."
+ % (prefix, user))
+ util.multi_log(message)
+ return
+ tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
+ 'Comment']
+ tbl = PrettyTable(tbl_fields)
+ for entry in key_entries:
+ if _is_printable_key(entry):
+ row = []
+ row.append(entry.keytype or '-')
+ row.append(_gen_fingerprint(entry.base64, hash_meth) or '-')
+ row.append(entry.options or '-')
+ row.append(entry.comment or '-')
+ tbl.add_row(row)
+ authtbl_s = tbl.get_string()
+ authtbl_lines = authtbl_s.splitlines()
+ max_len = len(max(authtbl_lines, key=len))
+ lines = [
+ util.center("Authorized keys from %s for user %s" %
+ (key_fn, user), "+", max_len),
+ ]
+ lines.extend(authtbl_lines)
+ for line in lines:
+ util.multi_log(text="%s%s\n" % (prefix, line),
+ stderr=False, console=True)
+
+
+def handle(name, cfg, cloud, log, _args):
+ if 'no_ssh_fingerprints' in cfg:
+ log.debug(("Skipping module named %s, "
+ "logging of ssh fingerprints disabled"), name)
+
+ user_name = util.get_cfg_option_str(cfg, "user", "ubuntu")
+ hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
+ extract = ssh_util.extract_authorized_keys
+ (auth_key_fn, auth_key_entries) = extract(user_name, cloud.paths)
+ _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index c58b28ec..08fb63c6 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -19,35 +19,83 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import util
+import pwd
# The ssh-import-id only seems to exist on ubuntu (for now)
# https://launchpad.net/ssh-import-id
distros = ['ubuntu']
-def handle(name, cfg, _cloud, log, args):
+def handle(_name, cfg, cloud, log, args):
+
+ # import for "user: XXXXX"
if len(args) != 0:
user = args[0]
ids = []
if len(args) > 1:
ids = args[1:]
- else:
- user = util.get_cfg_option_str(cfg, "user", "ubuntu")
- ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
- if len(ids) == 0:
- log.debug("Skipping module named %s, no ids found to import", name)
+ import_ssh_ids(ids, user, log)
return
- if not user:
- log.debug("Skipping module named %s, no user found to import", name)
+ # import for cloudinit created users
+ elist = []
+ for user_cfg in cfg['users']:
+ user = None
+ import_ids = []
+
+ if isinstance(user_cfg, str) and user_cfg == "default":
+ user = cloud.distro.get_default_user()
+ if not user:
+ continue
+
+ import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
+
+ elif isinstance(user_cfg, dict):
+ user = None
+ import_ids = []
+
+ try:
+ user = user_cfg['name']
+ import_ids = user_cfg['ssh_import_id']
+
+ if import_ids and isinstance(import_ids, str):
+ import_ids = str(import_ids).split(',')
+
+ except:
+ log.debug("user %s is not configured for ssh_import" % user)
+ continue
+
+ if not len(import_ids):
+ continue
+
+ try:
+ import_ssh_ids(import_ids, user, log)
+ except Exception as exc:
+ util.logexc(log, "ssh-import-id failed for: %s %s" %
+ (user, import_ids), exc)
+ elist.append(exc)
+
+ if len(elist):
+ raise elist[0]
+
+
+def import_ssh_ids(ids, user, log):
+
+ if not (user and ids):
+ log.debug("empty user(%s) or ids(%s). not importing", user, ids)
return
+ try:
+ _check = pwd.getpwnam(user)
+ except KeyError as exc:
+ raise exc
+
cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
log.debug("Importing ssh ids for user %s.", user)
try:
util.subp(cmd, capture=False)
- except util.ProcessExecutionError as e:
+ except util.ProcessExecutionError as exc:
util.logexc(log, "Failed to run command to import %s ssh ids", user)
- raise e
+ raise exc
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 38108da7..4d75000f 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -18,8 +18,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit import util
from cloudinit import templater
+from cloudinit import util
from cloudinit.settings import PER_ALWAYS
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index b84a1a06..1d6679ea 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -20,8 +20,8 @@
import os
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
frequency = PER_ALWAYS
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
new file mode 100644
index 00000000..418f3330
--- /dev/null
+++ b/cloudinit/config/cc_users_groups.py
@@ -0,0 +1,78 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+
+def handle(name, cfg, cloud, log, _args):
+ user_zero = None
+
+ if 'groups' in cfg:
+ for group in cfg['groups']:
+ if isinstance(group, dict):
+ for name, values in group.iteritems():
+ if isinstance(values, list):
+ cloud.distro.create_group(name, values)
+ elif isinstance(values, str):
+ cloud.distro.create_group(name, values.split(','))
+ else:
+ cloud.distro.create_group(group, [])
+
+ if 'users' in cfg:
+ user_zero = None
+
+ for user_config in cfg['users']:
+
+ # Handle the default user creation
+ if 'default' in user_config:
+ log.info("Creating default user")
+
+ # Create the default user if so defined
+ try:
+ cloud.distro.add_default_user()
+
+ if not user_zero:
+ user_zero = cloud.distro.get_default_user()
+
+ except NotImplementedError:
+
+ if user_zero == name:
+ user_zero = None
+
+ log.warn("Distro has not implemented default user "
+ "creation. No default user will be created")
+
+ elif isinstance(user_config, dict) and 'name' in user_config:
+
+ name = user_config['name']
+ if not user_zero:
+ user_zero = name
+
+ # Make options friendly for distro.create_user
+ new_opts = {}
+ if isinstance(user_config, dict):
+ for opt in user_config:
+ new_opts[opt.replace('-', '_')] = user_config[opt]
+
+ cloud.distro.create_user(**new_opts)
+
+ else:
+ # create user with no configuration
+ cloud.distro.create_user(user_config)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 1bfa4c25..a73d6f4e 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -19,8 +19,8 @@
import base64
import os
-from cloudinit import util
from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
frequency = PER_INSTANCE
@@ -46,7 +46,7 @@ def canonicalize_extraction(encoding_type, log):
return ['application/x-gzip']
if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']:
return ['application/base64', 'application/x-gzip']
- # Yaml already encodes binary data as base64 if it is given to the
+ # Yaml already encodes binary data as base64 if it is given to the
# yaml file as binary, so those will be automatically decoded for you.
# But the above b64 is just for people that are more 'comfortable'
# specifing it manually (which might be a possiblity)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index da4d0180..3e9d934d 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -7,6 +7,7 @@
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -23,12 +24,17 @@
from StringIO import StringIO
import abc
+import grp
+import os
+import pwd
+import re
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import ssh_util
from cloudinit import util
-# TODO: Make this via config??
+# TODO(harlowja): Make this via config??
IFACE_ACTIONS = {
'up': ['ifup', '--all'],
'down': ['ifdown', '--all'],
@@ -40,12 +46,42 @@ LOG = logging.getLogger(__name__)
class Distro(object):
__metaclass__ = abc.ABCMeta
+ default_user = None
+ default_user_groups = None
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
self.name = name
+ def add_default_user(self):
+ # Adds the distro user using the rules:
+ # - Password is same as username but is locked
+ # - nopasswd sudo access
+
+ user = self.get_default_user()
+ groups = self.get_default_user_groups()
+
+ if not user:
+ raise NotImplementedError("No Default user")
+
+ user_dict = {
+ 'name': user,
+ 'plain_text_passwd': user,
+ 'home': "/home/%s" % user,
+ 'shell': "/bin/bash",
+ 'lock_passwd': True,
+ 'gecos': "%s%s" % (user[0:1].upper(), user[1:]),
+ 'sudo': "ALL=(ALL) NOPASSWD:ALL",
+ }
+
+ if groups:
+ user_dict['groups'] = groups
+
+ self.create_user(**user_dict)
+
+ LOG.info("Added default '%s' user with passwordless sudo", user)
+
@abc.abstractmethod
def install_packages(self, pkglist):
raise NotImplementedError()
@@ -75,8 +111,26 @@ class Distro(object):
def update_package_sources(self):
raise NotImplementedError()
- def get_package_mirror(self):
- return self.get_option('package_mirror')
+ def get_primary_arch(self):
+ arch = os.uname[4]
+ if arch in ("i386", "i486", "i586", "i686"):
+ return "i386"
+ return arch
+
+ def _get_arch_package_mirror_info(self, arch=None):
+ mirror_info = self.get_option("package_mirrors", None)
+ if arch == None:
+ arch = self.get_primary_arch()
+ return _get_arch_package_mirror_info(mirror_info, arch)
+
+ def get_package_mirror_info(self, arch=None,
+ availability_zone=None):
+ # this resolves the package_mirrors config option
+ # down to a single dict of {mirror_name: mirror_url}
+ arch_info = self._get_arch_package_mirror_info(arch)
+
+ return _get_package_mirror_info(availability_zone=availability_zone,
+ mirror_info=arch_info)
def apply_network(self, settings, bring_up=True):
# Write it out
@@ -150,6 +204,231 @@ class Distro(object):
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
+ def isuser(self, name):
+ try:
+ if pwd.getpwnam(name):
+ return True
+ except KeyError:
+ return False
+
+ def get_default_user(self):
+ return self.default_user
+
+ def get_default_user_groups(self):
+ return self.default_user_groups
+
+ def create_user(self, name, **kwargs):
+ """
+ Creates users for the system using the GNU passwd tools. This
+ will work on an GNU system. This should be overriden on
+ distros where useradd is not desirable or not available.
+ """
+
+ adduser_cmd = ['useradd', name]
+ x_adduser_cmd = ['useradd', name]
+
+ # Since we are creating users, we want to carefully validate the
+ # inputs. If something goes wrong, we can end up with a system
+ # that nobody can login to.
+ adduser_opts = {
+ "gecos": '--comment',
+ "homedir": '--home',
+ "primary_group": '--gid',
+ "groups": '--groups',
+ "passwd": '--password',
+ "shell": '--shell',
+ "expiredate": '--expiredate',
+ "inactive": '--inactive',
+ "selinux_user": '--selinux-user',
+ }
+
+ adduser_opts_flags = {
+ "no_user_group": '--no-user-group',
+ "system": '--system',
+ "no_log_init": '--no-log-init',
+ "no_create_home": "-M",
+ }
+
+ # Now check the value and create the command
+ for option in kwargs:
+ value = kwargs[option]
+ if option in adduser_opts and value \
+ and isinstance(value, str):
+ adduser_cmd.extend([adduser_opts[option], value])
+
+ # Redact the password field from the logs
+ if option != "password":
+ x_adduser_cmd.extend([adduser_opts[option], value])
+ else:
+ x_adduser_cmd.extend([adduser_opts[option], 'REDACTED'])
+
+ elif option in adduser_opts_flags and value:
+ adduser_cmd.append(adduser_opts_flags[option])
+ x_adduser_cmd.append(adduser_opts_flags[option])
+
+ # Default to creating home directory unless otherwise directed
+ # Also, we do not create home directories for system users.
+ if "no_create_home" not in kwargs and "system" not in kwargs:
+ adduser_cmd.append('-m')
+
+ # Create the user
+ if self.isuser(name):
+ LOG.warn("User %s already exists, skipping." % name)
+ else:
+ LOG.debug("Creating name %s" % name)
+ try:
+ util.subp(adduser_cmd, logstring=x_adduser_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to create user %s due to error.", e)
+ raise e
+
+ # Set password if plain-text password provided
+ if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
+ self.set_passwd(name, kwargs['plain_text_passwd'])
+
+ # Default locking down the account.
+ if ('lock_passwd' not in kwargs and
+ ('lock_passwd' in kwargs and kwargs['lock_passwd']) or
+ 'system' not in kwargs):
+ try:
+ util.subp(['passwd', '--lock', name])
+ except Exception as e:
+ util.logexc(LOG, ("Failed to disable password logins for"
+ "user %s" % name), e)
+ raise e
+
+ # Configure sudo access
+ if 'sudo' in kwargs:
+ self.write_sudo_rules(name, kwargs['sudo'])
+
+ # Import SSH keys
+ if 'ssh_authorized_keys' in kwargs:
+ keys = set(kwargs['ssh_authorized_keys']) or []
+ ssh_util.setup_user_keys(keys, name, None, self._paths)
+
+ return True
+
+ def set_passwd(self, user, passwd, hashed=False):
+ pass_string = '%s:%s' % (user, passwd)
+ cmd = ['chpasswd']
+
+ if hashed:
+ cmd.append('--encrypted')
+
+ try:
+ util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
+ except Exception as e:
+ util.logexc(LOG, "Failed to set password for %s" % user)
+ raise e
+
+ return True
+
+ def write_sudo_rules(self,
+ user,
+ rules,
+ sudo_file="/etc/sudoers.d/90-cloud-init-users",
+ ):
+
+ content_header = "# user rules for %s" % user
+ content = "%s\n%s %s\n\n" % (content_header, user, rules)
+
+ if isinstance(rules, list):
+ content = "%s\n" % content_header
+ for rule in rules:
+ content += "%s %s\n" % (user, rule)
+ content += "\n"
+
+ if not os.path.exists(sudo_file):
+ util.write_file(sudo_file, content, 0644)
+
+ else:
+ try:
+ with open(sudo_file, 'a') as f:
+ f.write(content)
+ except IOError as e:
+ util.logexc(LOG, "Failed to write %s" % sudo_file, e)
+ raise e
+
+ def isgroup(self, name):
+ try:
+ if grp.getgrnam(name):
+ return True
+ except:
+ return False
+
+ def create_group(self, name, members):
+ group_add_cmd = ['groupadd', name]
+
+ # Check if group exists, and then add it doesn't
+ if self.isgroup(name):
+ LOG.warn("Skipping creation of existing group '%s'" % name)
+ else:
+ try:
+ util.subp(group_add_cmd)
+ LOG.info("Created new group %s" % name)
+ except Exception as e:
+ util.logexc("Failed to create group %s" % name, e)
+
+ # Add members to the group, if so defined
+ if len(members) > 0:
+ for member in members:
+ if not self.isuser(member):
+ LOG.warn("Unable to add group member '%s' to group '%s'"
+ "; user does not exist." % (member, name))
+ continue
+
+ util.subp(['usermod', '-a', '-G', name, member])
+ LOG.info("Added user '%s' to group '%s'" % (member, name))
+
+
+def _get_package_mirror_info(mirror_info, availability_zone=None,
+ mirror_filter=util.search_for_mirror):
+ # given a arch specific 'mirror_info' entry (from package_mirrors)
+ # search through the 'search' entries, and fallback appropriately
+ # return a dict with only {name: mirror} entries.
+
+ ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" %
+ "north|northeast|east|southeast|south|southwest|west|northwest")
+
+ subst = {}
+ if availability_zone:
+ subst['availability_zone'] = availability_zone
+
+ if availability_zone and re.match(ec2_az_re, availability_zone):
+ subst['ec2_region'] = "%s" % availability_zone[0:-1]
+
+ results = {}
+ for (name, mirror) in mirror_info.get('failsafe', {}).iteritems():
+ results[name] = mirror
+
+ for (name, searchlist) in mirror_info.get('search', {}).iteritems():
+ mirrors = []
+ for tmpl in searchlist:
+ try:
+ mirrors.append(tmpl % subst)
+ except KeyError:
+ pass
+
+ found = mirror_filter(mirrors)
+ if found:
+ results[name] = found
+
+ LOG.debug("filtered distro mirror info: %s" % results)
+
+ return results
+
+
+def _get_arch_package_mirror_info(package_mirrors, arch):
+ # pull out the specific arch from a 'package_mirrors' config option
+ default = None
+ for item in package_mirrors:
+ arches = item.get("arches")
+ if arch in arches:
+ return item
+ if "default" in arches:
+ default = item
+ return default
+
def fetch(name):
locs = importer.find_module(name,
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 3247d7ce..5b4aa9f8 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -46,11 +46,8 @@ class Distro(distros.Distro):
out_fn = self._paths.join(False, '/etc/default/locale')
util.subp(['locale-gen', locale], capture=False)
util.subp(['update-locale', locale], capture=False)
- contents = [
- "# Created by cloud-init",
- 'LANG="%s"' % (locale),
- ]
- util.write_file(out_fn, "\n".join(contents))
+ lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""]
+ util.write_file(out_fn, "\n".join(lines))
def install_packages(self, pkglist):
self.update_package_sources()
@@ -69,11 +66,8 @@ class Distro(distros.Distro):
util.subp(['hostname', hostname])
def _write_hostname(self, hostname, out_fn):
- lines = []
- lines.append("# Created by cloud-init")
- lines.append(str(hostname))
- contents = "\n".join(lines)
- util.write_file(out_fn, contents, 0644)
+ # "" gives trailing newline.
+ util.write_file(out_fn, "%s\n" % str(hostname), 0644)
def update_hostname(self, hostname, prev_fn):
hostname_prev = self._read_hostname(prev_fn)
@@ -123,13 +117,10 @@ class Distro(distros.Distro):
if not os.path.isfile(tz_file):
raise RuntimeError(("Invalid timezone %s,"
" no file found at %s") % (tz, tz_file))
- tz_lines = [
- "# Created by cloud-init",
- str(tz),
- ]
- tz_contents = "\n".join(tz_lines)
+ # "" provides trailing newline during join
+ tz_lines = ["# Created by cloud-init", str(tz), ""]
tz_fn = self._paths.join(False, "/etc/timezone")
- util.write_file(tz_fn, tz_contents)
+ util.write_file(tz_fn, "\n".join(tz_lines))
util.copy(tz_file, self._paths.join(False, "/etc/localtime"))
def package_command(self, command, args=None):
@@ -147,3 +138,7 @@ class Distro(distros.Distro):
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
["update"], freq=PER_INSTANCE)
+
+ def get_primary_arch(self):
+ (arch, _err) = util.subp(['dpkg', '--print-architecture'])
+ return str(arch).strip()
diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py
index c777845d..9f76a116 100644
--- a/cloudinit/distros/fedora.py
+++ b/cloudinit/distros/fedora.py
@@ -28,4 +28,5 @@ LOG = logging.getLogger(__name__)
class Distro(rhel.Distro):
- pass
+ distro_name = 'fedora'
+ default_user = 'ec2-user'
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 700a98a4..b77f1b70 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -81,7 +81,7 @@ class Distro(distros.Distro):
util.write_file(resolve_rw_fn, "\n".join(contents), 0644)
def _write_network(self, settings):
- # TODO fix this... since this is the ubuntu format
+ # TODO(harlowja) fix this... since this is the ubuntu format
entries = translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
@@ -278,7 +278,7 @@ class QuotingConfigObj(ConfigObj):
# This is a util function to translate a ubuntu /etc/network/interfaces 'blob'
# to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/
-# TODO remove when we have python-netcf active...
+# TODO(harlowja) remove when we have python-netcf active...
def translate_network(settings):
# Get the standard cmd, args from the ubuntu format
entries = []
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 77c2aff4..22f8c2c5 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -7,6 +7,7 @@
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -21,11 +22,14 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit.distros import debian
-
from cloudinit import log as logging
LOG = logging.getLogger(__name__)
class Distro(debian.Distro):
- pass
+
+ distro_name = 'ubuntu'
+ default_user = 'ubuntu'
+ default_user_groups = ("adm,audio,cdrom,dialout,floppy,video,"
+ "plugdev,dip,netdev,sudo")
diff --git a/cloudinit/filters/__init__.py b/cloudinit/filters/__init__.py
new file mode 100644
index 00000000..da124641
--- /dev/null
+++ b/cloudinit/filters/__init__.py
@@ -0,0 +1,21 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
new file mode 100644
index 00000000..5bebd318
--- /dev/null
+++ b/cloudinit/filters/launch_index.py
@@ -0,0 +1,75 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import copy
+
+from cloudinit import log as logging
+from cloudinit import user_data as ud
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class Filter(object):
+ def __init__(self, wanted_idx, allow_none=True):
+ self.wanted_idx = wanted_idx
+ self.allow_none = allow_none
+
+ def _select(self, message):
+ msg_idx = message.get('Launch-Index', None)
+ if self.allow_none and msg_idx is None:
+ return True
+ msg_idx = util.safe_int(msg_idx)
+ if msg_idx != self.wanted_idx:
+ return False
+ return True
+
+ def _do_filter(self, message):
+ # Don't use walk() here since we want to do the reforming of the
+ # messages ourselves and not flatten the message listings...
+ if not self._select(message):
+ return None
+ if message.is_multipart():
+ # Recreate it and its child messages
+ prev_msgs = message.get_payload(decode=False)
+ new_msgs = []
+ discarded = 0
+ for m in prev_msgs:
+ m = self._do_filter(m)
+ if m is not None:
+ new_msgs.append(m)
+ else:
+ discarded += 1
+ LOG.debug(("Discarding %s multipart messages "
+ "which do not match launch index %s"),
+ discarded, self.wanted_idx)
+ new_message = copy.copy(message)
+ new_message.set_payload(new_msgs)
+ new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs))
+ return new_message
+ else:
+ return copy.copy(message)
+
+ def apply(self, root_message):
+ if self.wanted_idx is None:
+ return root_message
+ return self._do_filter(root_message)
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 6d1502f4..99caed1f 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -133,7 +133,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):
modfname = os.path.join(pdata['handlerdir'], "%s" % (modname))
if not modfname.endswith(".py"):
modfname = "%s.py" % (modfname)
- # TODO: Check if path exists??
+ # TODO(harlowja): Check if path exists??
util.write_file(modfname, payload, 0600)
handlers = pdata['handlers']
try:
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index a9d8e544..6c5c11ca 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -43,7 +43,7 @@ class ShellScriptPartHandler(handlers.Handler):
def _handle_part(self, _data, ctype, filename, payload, _frequency):
if ctype in handlers.CONTENT_SIGNALS:
- # TODO: maybe delete existing things here
+ # TODO(harlowja): maybe delete existing things here
return
filename = util.clean_filename(filename)
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 819c85b6..2333e5ee 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -21,8 +21,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
-import logging.handlers
import logging.config
+import logging.handlers
import collections
import os
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 2083cf60..8cc9e3b4 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -31,10 +31,13 @@ CFG_BUILTIN = {
'datasource_list': [
'NoCloud',
'ConfigDrive',
+ 'AltCloud',
'OVF',
'MAAS',
'Ec2',
- 'CloudStack'
+ 'CloudStack',
+ # At the end to act as a 'catch' when none of the above work...
+ 'None',
],
'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
new file mode 100644
index 00000000..69c376a5
--- /dev/null
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -0,0 +1,299 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joe VLcek <JVLcek@RedHat.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''
+This file contains code used to gather the user data passed to an
+instance on RHEVm and vSphere.
+'''
+
+import errno
+import os
+import os.path
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.util import ProcessExecutionError
+
+LOG = logging.getLogger(__name__)
+
+# Needed file paths
+CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
+
+# Shell command lists
+CMD_DMI_SYSTEM = ['/usr/sbin/dmidecode', '--string', 'system-product-name']
+CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
+CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
+
+META_DATA_NOT_SUPPORTED = {
+ 'block-device-mapping': {},
+ 'instance-id': 455,
+ 'local-hostname': 'localhost',
+ 'placement': {},
+ }
+
+
+def read_user_data_callback(mount_dir):
+ '''
+ Description:
+ This callback will be applied by util.mount_cb() on the mounted
+ file.
+
+ Deltacloud file name contains deltacloud. Those not using
+ Deltacloud but instead instrumenting the injection, could
+ drop deltacloud from the file name.
+
+ Input:
+ mount_dir - Mount directory
+
+ Returns:
+ User Data
+
+ '''
+
+ deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
+ user_data_file = mount_dir + '/user-data.txt'
+
+ # First try deltacloud_user_data_file. On failure try user_data_file.
+ try:
+ with open(deltacloud_user_data_file, 'r') as user_data_f:
+ user_data = user_data_f.read().strip()
+ except:
+ try:
+ with open(user_data_file, 'r') as user_data_f:
+ user_data = user_data_f.read().strip()
+ except:
+ util.logexc(LOG, ('Failed accessing user data file.'))
+ return None
+
+ return user_data
+
+
+class DataSourceAltCloud(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed = None
+ self.supported_seed_starts = ("/", "file://")
+
+ def __str__(self):
+ mstr = "%s [seed=%s]" % (util.obj_name(self), self.seed)
+ return mstr
+
+ def get_cloud_type(self):
+ '''
+ Description:
+ Get the type for the cloud back end this instance is running on
+ by examining the string returned by:
+ dmidecode --string system-product-name
+
+ On VMWare/vSphere dmidecode returns: RHEV Hypervisor
+ On VMWare/vSphere dmidecode returns: VMware Virtual Platform
+
+ Input:
+ None
+
+ Returns:
+ One of the following strings:
+ 'RHEV', 'VSPHERE' or 'UNKNOWN'
+
+ '''
+
+ cmd = CMD_DMI_SYSTEM
+ try:
+ (cmd_out, _err) = util.subp(cmd)
+ except ProcessExecutionError, _err:
+ LOG.debug(('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message))
+ return 'UNKNOWN'
+ except OSError, _err:
+ LOG.debug(('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message))
+ return 'UNKNOWN'
+
+ if cmd_out.upper().startswith('RHEV'):
+ return 'RHEV'
+
+ if cmd_out.upper().startswith('VMWARE'):
+ return 'VSPHERE'
+
+ return 'UNKNOWN'
+
+ def get_data(self):
+ '''
+ Description:
+ User Data is passed to the launching instance which
+ is used to perform instance configuration.
+
+ Cloud providers expose the user data differently.
+ It is necessary to determine which cloud provider
+ the current instance is running on to determine
+ how to access the user data. Images built with
+ image factory will contain a CLOUD_INFO_FILE which
+ contains a string identifying the cloud provider.
+
+ Images not built with Imagefactory will try to
+ determine what the cloud provider is based on system
+ information.
+ '''
+
+ LOG.debug('Invoked get_data()')
+
+ if os.path.exists(CLOUD_INFO_FILE):
+ try:
+ cloud_info = open(CLOUD_INFO_FILE)
+ cloud_type = cloud_info.read().strip().upper()
+ cloud_info.close()
+ except:
+ util.logexc(LOG, 'Unable to access cloud info file.')
+ return False
+ else:
+ cloud_type = self.get_cloud_type()
+
+ LOG.debug('cloud_type: ' + str(cloud_type))
+
+ if 'RHEV' in cloud_type:
+ if self.user_data_rhevm():
+ return True
+ elif 'VSPHERE' in cloud_type:
+ if self.user_data_vsphere():
+ return True
+ else:
+ # there was no recognized alternate cloud type
+ # indicating this handler should not be used.
+ return False
+
+ # No user data found
+ util.logexc(LOG, ('Failed accessing user data.'))
+ return False
+
+ def user_data_rhevm(self):
+ '''
+ RHEVM specific userdata read
+
+ If on RHEV-M the user data will be contained on the
+ floppy device in file <user_data_file>
+ To access it:
+ modprobe floppy
+
+ Leverage util.mount_cb to:
+ mkdir <tmp mount dir>
+ mount /dev/fd0 <tmp mount dir>
+ The call back passed to util.mount_cb will do:
+ read <tmp mount dir>/<user_data_file>
+ '''
+
+ return_str = None
+
+ # modprobe floppy
+ try:
+ cmd = CMD_PROBE_FLOPPY
+ (cmd_out, _err) = util.subp(cmd)
+ LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
+ except ProcessExecutionError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+ except OSError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+
+ floppy_dev = '/dev/fd0'
+
+ # udevadm settle for floppy device
+ try:
+ cmd = CMD_UDEVADM_SETTLE
+ cmd.append('--exit-if-exists=' + floppy_dev)
+ (cmd_out, _err) = util.subp(cmd)
+ LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
+ except ProcessExecutionError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+ except OSError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+
+ try:
+ return_str = util.mount_cb(floppy_dev, read_user_data_callback)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ except util.MountFailedError:
+ util.logexc(LOG, ("Failed to mount %s"
+ " when looking for user data"), floppy_dev)
+
+ self.userdata_raw = return_str
+ self.metadata = META_DATA_NOT_SUPPORTED
+
+ if return_str:
+ return True
+ else:
+ return False
+
+ def user_data_vsphere(self):
+ '''
+ vSphere specific userdata read
+
+ If on vSphere the user data will be contained on the
+ cdrom device in file <user_data_file>
+ To access it:
+ Leverage util.mount_cb to:
+ mkdir <tmp mount dir>
+ mount /dev/fd0 <tmp mount dir>
+ The call back passed to util.mount_cb will do:
+ read <tmp mount dir>/<user_data_file>
+ '''
+
+ return_str = None
+ cdrom_list = util.find_devs_with('LABEL=CDROM')
+ for cdrom_dev in cdrom_list:
+ try:
+ return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
+ if return_str:
+ break
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ except util.MountFailedError:
+ util.logexc(LOG, ("Failed to mount %s"
+ " when looking for user data"), cdrom_dev)
+
+ self.userdata_raw = return_str
+ self.metadata = META_DATA_NOT_SUPPORTED
+
+ if return_str:
+ return True
+ else:
+ return False
+
+# Used to match classes to dependencies
+# Source DataSourceAltCloud does not really depend on networking.
+# In the future 'dsmode' like behavior can be added to offer user
+# the ability to run before networking.
+datasources = [
+ (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 751bef4f..f7ffa7cb 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -49,8 +49,7 @@ class DataSourceCloudStack(sources.DataSource):
self.metadata_address = "http://%s/" % (gw_addr)
def get_default_gateway(self):
- """ Returns the default gateway ip address in the dotted format
- """
+ """Returns the default gateway ip address in the dotted format."""
lines = util.load_file("/proc/net/route").splitlines()
for line in lines:
items = line.split("\t")
@@ -132,7 +131,8 @@ class DataSourceCloudStack(sources.DataSource):
def get_instance_id(self):
return self.metadata['instance-id']
- def get_availability_zone(self):
+ @property
+ def availability_zone(self):
return self.metadata['availability-zone']
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 320dd1d1..b8154367 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -30,88 +30,119 @@ LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive"
DEFAULT_MODE = 'pass'
-CFG_DRIVE_FILES = [
+CFG_DRIVE_FILES_V1 = [
"etc/network/interfaces",
"root/.ssh/authorized_keys",
"meta.js",
]
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
- "dsmode": DEFAULT_MODE,
}
-CFG_DRIVE_DEV_ENV = 'CLOUD_INIT_CONFIG_DRIVE_DEVICE'
+VALID_DSMODES = ("local", "net", "pass", "disabled")
class DataSourceConfigDrive(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.cfg = {}
+ self.source = None
self.dsmode = 'local'
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
+ self.version = None
def __str__(self):
- mstr = "%s [%s]" % (util.obj_name(self), self.dsmode)
- mstr += "[seed=%s]" % (self.seed)
+ mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode,
+ self.version)
+ mstr += "[source=%s]" % (self.source)
return mstr
def get_data(self):
found = None
md = {}
- ud = ""
+ results = {}
if os.path.isdir(self.seed_dir):
try:
- (md, ud) = read_config_drive_dir(self.seed_dir)
+ results = read_config_drive_dir(self.seed_dir)
found = self.seed_dir
except NonConfigDriveDir:
util.logexc(LOG, "Failed reading config drive from %s",
self.seed_dir)
if not found:
- dev = find_cfg_drive_device()
- if dev:
+ devlist = find_candidate_devs()
+ for dev in devlist:
try:
- (md, ud) = util.mount_cb(dev, read_config_drive_dir)
+ results = util.mount_cb(dev, read_config_drive_dir)
found = dev
+ break
except (NonConfigDriveDir, util.MountFailedError):
pass
+ except BrokenConfigDriveDir:
+ util.logexc(LOG, "broken config drive: %s", dev)
if not found:
return False
- if 'dsconfig' in md:
- self.cfg = md['dscfg']
-
+ md = results['metadata']
md = util.mergedict(md, DEFAULT_METADATA)
- # Update interfaces and ifup only on the local datasource
- # this way the DataSourceConfigDriveNet doesn't do it also.
- if 'network-interfaces' in md and self.dsmode == "local":
+ user_dsmode = results.get('dsmode', None)
+ if user_dsmode not in VALID_DSMODES + (None,):
+ LOG.warn("user specified invalid mode: %s" % user_dsmode)
+ user_dsmode = None
+
+ dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'],
+ ds_cfg=self.ds_cfg.get('dsmode'),
+ user=user_dsmode)
+
+ if dsmode == "disabled":
+ # most likely user specified
+ return False
+
+ # TODO(smoser): fix this, its dirty.
+ # we want to do some things (writing files and network config)
+ # only on first boot, and even then, we want to do so in the
+ # local datasource (so they happen earlier) even if the configured
+ # dsmode is 'net' or 'pass'. To do this, we check the previous
+ # instance-id
+ prev_iid = get_previous_iid(self.paths)
+ cur_iid = md['instance-id']
+
+ if ('network_config' in results and self.dsmode == "local" and
+ prev_iid != cur_iid):
LOG.debug("Updating network interfaces from config drive (%s)",
- md['dsmode'])
- self.distro.apply_network(md['network-interfaces'])
+ dsmode)
+ self.distro.apply_network(results['network_config'])
- self.seed = found
- self.metadata = md
- self.userdata_raw = ud
+ # file writing occurs in local mode (to be as early as possible)
+ if self.dsmode == "local" and prev_iid != cur_iid and results['files']:
+ LOG.debug("writing injected files")
+ try:
+ write_files(results['files'])
+ except:
+ util.logexc(LOG, "Failed writing files")
+
+ # dsmode != self.dsmode here if:
+ # * dsmode = "pass", pass means it should only copy files and then
+ # pass to another datasource
+ # * dsmode = "net" and self.dsmode = "local"
+ # so that user boothooks would be applied with network, the
+ # local datasource just gets out of the way, and lets the net claim
+ if dsmode != self.dsmode:
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+ return False
- if md['dsmode'] == self.dsmode:
- return True
+ self.source = found
+ self.metadata = md
+ self.userdata_raw = results.get('userdata')
+ self.version = results['cfgdrive_ver']
- LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
- return False
+ return True
def get_public_ssh_keys(self):
if not 'public-keys' in self.metadata:
return []
return self.metadata['public-keys']
- # The data sources' config_obj is a cloud-config formated
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return self.cfg
-
class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths):
@@ -123,48 +154,146 @@ class NonConfigDriveDir(Exception):
pass
-def find_cfg_drive_device():
- """ Get the config drive device. Return a string like '/dev/vdb'
- or None (if there is no non-root device attached). This does not
- check the contents, only reports that if there *were* a config_drive
- attached, it would be this device.
- Note: per config_drive documentation, this is
- "associated as the last available disk on the instance"
- """
+class BrokenConfigDriveDir(Exception):
+ pass
- # This seems to be for debugging??
- if CFG_DRIVE_DEV_ENV in os.environ:
- return os.environ[CFG_DRIVE_DEV_ENV]
- # We are looking for a raw block device (sda, not sda1) with a vfat
- # filesystem on it....
- letters = "abcdefghijklmnopqrstuvwxyz"
- devs = util.find_devs_with("TYPE=vfat")
+def find_candidate_devs():
+ """Return a list of devices that may contain the config drive.
- # Filter out anything not ending in a letter (ignore partitions)
- devs = [f for f in devs if f[-1] in letters]
+ The returned list is sorted by search order where the first item has
+ should be searched first (highest priority)
+
+ config drive v1:
+ Per documentation, this is "associated as the last available disk on the
+ instance", and should be VFAT.
+ Currently, we do not restrict search list to "last available disk"
+
+ config drive v2:
+ Disk should be:
+ * either vfat or iso9660 formated
+ * labeled with 'config-2'
+ """
- # Sort them in reverse so "last" device is first
- devs.sort(reverse=True)
+ by_fstype = (util.find_devs_with("TYPE=vfat") +
+ util.find_devs_with("TYPE=iso9660"))
+ by_label = util.find_devs_with("LABEL=config-2")
- if devs:
- return devs[0]
+ # give preference to "last available disk" (vdb over vda)
+ # note, this is not a perfect rendition of that.
+ by_fstype.sort(reverse=True)
+ by_label.sort(reverse=True)
- return None
+ # combine list of items by putting by-label items first
+ # followed by fstype items, but with dupes removed
+ combined = (by_label + [d for d in by_fstype if d not in by_label])
+
+ # We are looking for block device (sda, not sda1), ignore partitions
+ combined = [d for d in combined if d[-1] not in "0123456789"]
+
+ return combined
def read_config_drive_dir(source_dir):
+ last_e = NonConfigDriveDir("Not found")
+ for finder in (read_config_drive_dir_v2, read_config_drive_dir_v1):
+ try:
+ data = finder(source_dir)
+ return data
+ except NonConfigDriveDir as exc:
+ last_e = exc
+ raise last_e
+
+
+def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
+
+ if (not os.path.isdir(os.path.join(source_dir, "openstack", version)) and
+ os.path.isdir(os.path.join(source_dir, "openstack", "latest"))):
+ LOG.warn("version '%s' not available, attempting to use 'latest'" %
+ version)
+ version = "latest"
+
+ datafiles = (
+ ('metadata',
+ "openstack/%s/meta_data.json" % version, True, json.loads),
+ ('userdata', "openstack/%s/user_data" % version, False, None),
+ ('ec2-metadata', "ec2/latest/metadata.json", False, json.loads),
+ )
+
+ results = {'userdata': None}
+ for (name, path, required, process) in datafiles:
+ fpath = os.path.join(source_dir, path)
+ data = None
+ found = False
+ if os.path.isfile(fpath):
+ try:
+ with open(fpath) as fp:
+ data = fp.read()
+ except Exception as exc:
+ raise BrokenConfigDriveDir("failed to read: %s" % fpath)
+ found = True
+ elif required:
+ raise NonConfigDriveDir("missing mandatory %s" % fpath)
+
+ if found and process:
+ try:
+ data = process(data)
+ except Exception as exc:
+ raise BrokenConfigDriveDir("failed to process: %s" % fpath)
+
+ if found:
+ results[name] = data
+
+ # instance-id is 'uuid' for openstack. just copy it to instance-id.
+ if 'instance-id' not in results['metadata']:
+ try:
+ results['metadata']['instance-id'] = results['metadata']['uuid']
+ except KeyError:
+ raise BrokenConfigDriveDir("No uuid entry in metadata")
+
+ def read_content_path(item):
+ # do not use os.path.join here, as content_path starts with /
+ cpath = os.path.sep.join((source_dir, "openstack",
+ "./%s" % item['content_path']))
+ with open(cpath) as fp:
+ return(fp.read())
+
+ files = {}
+ try:
+ for item in results['metadata'].get('files', {}):
+ files[item['path']] = read_content_path(item)
+
+ # the 'network_config' item in metadata is a content pointer
+ # to the network config that should be applied.
+ # in folsom, it is just a '/etc/network/interfaces' file.
+ item = results['metadata'].get("network_config", None)
+ if item:
+ results['network_config'] = read_content_path(item)
+ except Exception as exc:
+ raise BrokenConfigDriveDir("failed to read file %s: %s" % (item, exc))
+
+ # to openstack, user can specify meta ('nova boot --meta=key=value') and
+ # those will appear under metadata['meta'].
+ # if they specify 'dsmode' they're indicating the mode that they intend
+ # for this datasource to operate in.
+ try:
+ results['dsmode'] = results['metadata']['meta']['dsmode']
+ except KeyError:
+ pass
+
+ results['files'] = files
+ results['cfgdrive_ver'] = 2
+ return results
+
+
+def read_config_drive_dir_v1(source_dir):
"""
- read_config_drive_dir(source_dir):
- read source_dir, and return a tuple with metadata dict and user-data
- string populated. If not a valid dir, raise a NonConfigDriveDir
+ read source_dir, and return a tuple with metadata dict, user-data,
+ files and version (1). If not a valid dir, raise a NonConfigDriveDir
"""
- # TODO: fix this for other operating systems...
- # Ie: this is where https://fedorahosted.org/netcf/ or similar should
- # be hooked in... (or could be)
found = {}
- for af in CFG_DRIVE_FILES:
+ for af in CFG_DRIVE_FILES_V1:
fn = os.path.join(source_dir, af)
if os.path.isfile(fn):
found[af] = fn
@@ -173,11 +302,10 @@ def read_config_drive_dir(source_dir):
raise NonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
md = {}
- ud = ""
keydata = ""
if "etc/network/interfaces" in found:
fn = found["etc/network/interfaces"]
- md['network-interfaces'] = util.load_file(fn)
+ md['network_config'] = util.load_file(fn)
if "root/.ssh/authorized_keys" in found:
fn = found["root/.ssh/authorized_keys"]
@@ -197,21 +325,77 @@ def read_config_drive_dir(source_dir):
(source_dir, "invalid json in meta.js", e))
md['meta_js'] = content
- # Key data override??
+ # keydata in meta_js is preferred over "injected"
keydata = meta_js.get('public-keys', keydata)
if keydata:
lines = keydata.splitlines()
md['public-keys'] = [l for l in lines
if len(l) and not l.startswith("#")]
- for copy in ('dsmode', 'instance-id', 'dscfg'):
- if copy in meta_js:
- md[copy] = meta_js[copy]
+ # config-drive-v1 has no way for openstack to provide the instance-id
+ # so we copy that into metadata from the user input
+ if 'instance-id' in meta_js:
+ md['instance-id'] = meta_js['instance-id']
+
+ results = {'cfgdrive_ver': 1, 'metadata': md}
+
+ # allow the user to specify 'dsmode' in a meta tag
+ if 'dsmode' in meta_js:
+ results['dsmode'] = meta_js['dsmode']
+
+ # config-drive-v1 has no way of specifying user-data, so the user has
+ # to cheat and stuff it in a meta tag also.
+ results['userdata'] = meta_js.get('user-data')
- if 'user-data' in meta_js:
- ud = meta_js['user-data']
+ # this implementation does not support files
+ # (other than network/interfaces and authorized_keys)
+ results['files'] = []
- return (md, ud)
+ return results
+
+
+def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
+ """Determine what mode should be used.
+ valid values are 'pass', 'disabled', 'local', 'net'
+ """
+ # user passed data trumps everything
+ if user is not None:
+ return user
+
+ if ds_cfg is not None:
+ return ds_cfg
+
+ # at config-drive version 1, the default behavior was pass. That
+ # meant to not use use it as primary data source, but expect a ec2 metadata
+ # source. for version 2, we default to 'net', which means
+ # the DataSourceConfigDriveNet, would be used.
+ #
+ # this could change in the future. If there was definitive metadata
+ # that indicated presense of an openstack metadata service, then
+ # we could change to 'pass' by default also. The motivation for that
+ # would be 'cloud-init query' as the web service could be more dynamic
+ if cfgdrv_ver == 1:
+ return "pass"
+ return "net"
+
+
+def get_previous_iid(paths):
+ # interestingly, for this purpose the "previous" instance-id is the current
+ # instance-id. cloud-init hasn't moved them over yet as this datasource
+ # hasn't declared itself found.
+ fname = os.path.join(paths.get_cpath('data'), 'instance-id')
+ try:
+ with open(fname) as fp:
+ return fp.read()
+ except IOError:
+ return None
+
+
+def write_files(files):
+ for (name, content) in files.iteritems():
+ if name[0] != os.sep:
+ name = os.sep + name
+ util.write_file(name, content, mode=0660)
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index d9eb8f17..c7ad6d54 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -40,7 +40,7 @@ DEF_MD_VERSION = '2009-04-04'
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
-DEF_MD_URLS = [DEF_MD_URL, "http://instance-data:8773"]
+DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"]
class DataSourceEc2(sources.DataSource):
@@ -77,46 +77,18 @@ class DataSourceEc2(sources.DataSource):
self.metadata_address)
return False
+ @property
+ def launch_index(self):
+ if not self.metadata:
+ return None
+ return self.metadata.get('ami-launch-index')
+
def get_instance_id(self):
return self.metadata['instance-id']
def get_availability_zone(self):
return self.metadata['placement']['availability-zone']
- def get_local_mirror(self):
- return self.get_mirror_from_availability_zone()
-
- def get_mirror_from_availability_zone(self, availability_zone=None):
- # Return type None indicates there is no cloud specific mirror
- # Availability is like 'us-west-1b' or 'eu-west-1a'
- if availability_zone is None:
- availability_zone = self.get_availability_zone()
-
- if self.is_vpc():
- return None
-
- if not availability_zone:
- return None
-
- mirror_tpl = self.distro.get_option('package_mirror_ec2_template',
- None)
-
- if mirror_tpl is None:
- return None
-
- # in EC2, the 'region' is 'us-east-1' if 'zone' is 'us-east-1a'
- tpl_params = {
- 'zone': availability_zone.strip(),
- 'region': availability_zone[:-1]
- }
- mirror_url = mirror_tpl % (tpl_params)
-
- found = util.search_for_mirror([mirror_url])
- if found is not None:
- return mirror_url
-
- return None
-
def _get_url_settings(self):
mcfg = self.ds_cfg
if not mcfg:
@@ -255,6 +227,12 @@ class DataSourceEc2(sources.DataSource):
return True
return False
+ @property
+ def availability_zone(self):
+ try:
+ return self.metadata['placement']['availability-zone']
+ except KeyError:
+ return None
# Used to match classes to dependencies
datasources = [
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
new file mode 100644
index 00000000..c2125bee
--- /dev/null
+++ b/cloudinit/sources/DataSourceNone.py
@@ -0,0 +1,61 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class DataSourceNone(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
+ self.metadata = {}
+ self.userdata_raw = ''
+
+ def get_data(self):
+ # If the datasource config has any provided 'fallback'
+ # userdata or metadata, use it...
+ if 'userdata_raw' in self.ds_cfg:
+ self.userdata_raw = self.ds_cfg['userdata_raw']
+ if 'metadata' in self.ds_cfg:
+ self.metadata = self.ds_cfg['metadata']
+ return True
+
+ def get_instance_id(self):
+ return 'iid-datasource-none'
+
+ def __str__(self):
+ return util.obj_name(self)
+
+ @property
+ def is_disconnected(self):
+ return True
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceNone, []),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index b25724a5..6f126091 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -20,6 +20,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from email.mime.multipart import MIMEMultipart
+
import abc
from cloudinit import importer
@@ -27,6 +29,8 @@ from cloudinit import log as logging
from cloudinit import user_data as ud
from cloudinit import util
+from cloudinit.filters import launch_index
+
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
DS_PREFIX = 'DataSource'
@@ -59,12 +63,34 @@ class DataSource(object):
else:
self.ud_proc = ud_proc
- def get_userdata(self):
+ def get_userdata(self, apply_filter=False):
if self.userdata is None:
- raw_data = self.get_userdata_raw()
- self.userdata = self.ud_proc.process(raw_data)
+ self.userdata = self.ud_proc.process(self.get_userdata_raw())
+ if apply_filter:
+ return self._filter_userdata(self.userdata)
return self.userdata
+ @property
+ def launch_index(self):
+ if not self.metadata:
+ return None
+ if 'launch-index' in self.metadata:
+ return self.metadata['launch-index']
+ return None
+
+ def _filter_userdata(self, processed_ud):
+ filters = [
+ launch_index.Filter(util.safe_int(self.launch_index)),
+ ]
+ new_ud = processed_ud
+ for f in filters:
+ new_ud = f.apply(new_ud)
+ return new_ud
+
+ @property
+ def is_disconnected(self):
+ return False
+
def get_userdata_raw(self):
return self.userdata_raw
@@ -113,9 +139,9 @@ class DataSource(object):
def get_locale(self):
return 'en_US.UTF-8'
- def get_local_mirror(self):
- # ??
- return None
+ @property
+ def availability_zone(self):
+ return self.metadata.get('availability-zone')
def get_instance_id(self):
if not self.metadata or 'instance-id' not in self.metadata:
@@ -147,7 +173,7 @@ class DataSource(object):
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
if util.is_ipv4(lhost):
- toks = "ip-%s" % lhost.replace(".", "-")
+ toks = [ "ip-%s" % lhost.replace(".", "-") ]
else:
toks = lhost.split(".")
@@ -162,6 +188,10 @@ class DataSource(object):
else:
return hostname
+ def get_package_mirror_info(self):
+ return self.distro.get_package_mirror_info(
+ availability_zone=self.availability_zone)
+
def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
ds_list = list_sources(cfg_list, ds_deps, pkg_list)
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index e0a2f0ca..88a11a1a 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -181,12 +181,11 @@ def parse_authorized_keys(fname):
return contents
-def update_authorized_keys(fname, keys):
- entries = parse_authorized_keys(fname)
+def update_authorized_keys(old_entries, keys):
to_add = list(keys)
- for i in range(0, len(entries)):
- ent = entries[i]
+ for i in range(0, len(old_entries)):
+ ent = old_entries[i]
if ent.empty() or not ent.base64:
continue
# Replace those with the same base64
@@ -199,66 +198,81 @@ def update_authorized_keys(fname, keys):
# Don't add it later
if k in to_add:
to_add.remove(k)
- entries[i] = ent
+ old_entries[i] = ent
# Now append any entries we did not match above
for key in to_add:
- entries.append(key)
+ old_entries.append(key)
# Now format them back to strings...
- lines = [str(b) for b in entries]
+ lines = [str(b) for b in old_entries]
# Ensure it ends with a newline
lines.append('')
return '\n'.join(lines)
-def setup_user_keys(keys, user, key_prefix, paths):
- # Make sure the users .ssh dir is setup accordingly
- pwent = pwd.getpwnam(user)
- ssh_dir = os.path.join(pwent.pw_dir, '.ssh')
- ssh_dir = paths.join(False, ssh_dir)
- if not os.path.exists(ssh_dir):
- util.ensure_dir(ssh_dir, mode=0700)
- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+def users_ssh_info(username, paths):
+ pw_ent = pwd.getpwnam(username)
+ if not pw_ent:
+ raise RuntimeError("Unable to get ssh info for user %r" % (username))
+ ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh'))
+ return (ssh_dir, pw_ent)
- # Turn the keys given into actual entries
- parser = AuthKeyLineParser()
- key_entries = []
- for k in keys:
- key_entries.append(parser.parse(str(k), def_opt=key_prefix))
+def extract_authorized_keys(username, paths):
+ (ssh_dir, pw_ent) = users_ssh_info(username, paths)
sshd_conf_fn = paths.join(True, DEF_SSHD_CFG)
+ auth_key_fn = None
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
- # AuthorizedKeysFile may contain tokens
+ # The 'AuthorizedKeysFile' may contain tokens
# of the form %T which are substituted during connection set-up.
# The following tokens are defined: %% is replaced by a literal
# '%', %h is replaced by the home directory of the user being
# authenticated and %u is replaced by the username of that user.
ssh_cfg = parse_ssh_config_map(sshd_conf_fn)
- akeys = ssh_cfg.get("authorizedkeysfile", '')
- akeys = akeys.strip()
- if not akeys:
- akeys = "%h/.ssh/authorized_keys"
- akeys = akeys.replace("%h", pwent.pw_dir)
- akeys = akeys.replace("%u", user)
- akeys = akeys.replace("%%", '%')
- if not akeys.startswith('/'):
- akeys = os.path.join(pwent.pw_dir, akeys)
- authorized_keys = paths.join(False, akeys)
+ auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
+ if not auth_key_fn:
+ auth_key_fn = "%h/.ssh/authorized_keys"
+ auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir)
+ auth_key_fn = auth_key_fn.replace("%u", username)
+ auth_key_fn = auth_key_fn.replace("%%", '%')
+ if not auth_key_fn.startswith('/'):
+ auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
+ auth_key_fn = paths.join(False, auth_key_fn)
except (IOError, OSError):
- authorized_keys = os.path.join(ssh_dir, 'authorized_keys')
+ # Give up and use a default key filename
+ auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'"
" in ssh config"
- " from %s, using 'AuthorizedKeysFile' file"
- " %s instead"),
- sshd_conf_fn, authorized_keys)
-
- content = update_authorized_keys(authorized_keys, key_entries)
- util.ensure_dir(os.path.dirname(authorized_keys), mode=0700)
- util.write_file(authorized_keys, content, mode=0600)
- util.chownbyid(authorized_keys, pwent.pw_uid, pwent.pw_gid)
+ " from %r, using 'AuthorizedKeysFile' file"
+ " %r instead"),
+ sshd_conf_fn, auth_key_fn)
+ auth_key_entries = parse_authorized_keys(auth_key_fn)
+ return (auth_key_fn, auth_key_entries)
+
+
+def setup_user_keys(keys, username, key_prefix, paths):
+ # Make sure the users .ssh dir is setup accordingly
+ (ssh_dir, pwent) = users_ssh_info(username, paths)
+ if not os.path.isdir(ssh_dir):
+ util.ensure_dir(ssh_dir, mode=0700)
+ util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+
+ # Turn the 'update' keys given into actual entries
+ parser = AuthKeyLineParser()
+ key_entries = []
+ for k in keys:
+ key_entries.append(parser.parse(str(k), def_opt=key_prefix))
+
+ # Extract the old and make the new
+ (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths)
+ with util.SeLinuxGuard(ssh_dir, recursive=True):
+ content = update_authorized_keys(auth_key_entries, key_entries)
+ util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700)
+ util.write_file(auth_key_fn, content, mode=0600)
+ util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
class SshdConfigLine(object):
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 2f6a566c..af902925 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -326,7 +326,7 @@ class Init(object):
'paths': self.paths,
'datasource': self.datasource,
}
- # TODO Hmmm, should we dynamically import these??
+ # TODO(harlowja) Hmmm, should we dynamically import these??
def_handlers = [
cc_part.CloudConfigPartHandler(**opts),
ss_part.ShellScriptPartHandler(**opts),
@@ -347,7 +347,7 @@ class Init(object):
sys.path.insert(0, idir)
# Ensure datasource fetched before activation (just incase)
- user_data_msg = self.datasource.get_userdata()
+ user_data_msg = self.datasource.get_userdata(True)
# This keeps track of all the active handlers
c_handlers = helpers.ContentHandlers()
@@ -519,7 +519,7 @@ class Modules(object):
" but not on %s distro. It may or may not work"
" correctly."), name, worked_distros, d_name)
# Use the configs logger and not our own
- # TODO: possibly check the module
+ # TODO(harlowja): possibly check the module
# for having a LOG attr and just give it back
# its own logger?
func_args = [name, self.cfg,
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index f5d01818..803ffc3a 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -23,9 +23,9 @@
import os
import email
+from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
-from email.mime.base import MIMEBase
from cloudinit import handlers
from cloudinit import log as logging
@@ -52,21 +52,23 @@ ARCHIVE_UNDEF_TYPE = "text/cloud-config"
# Msg header used to track attachments
ATTACHMENT_FIELD = 'Number-Attachments'
+# Only the following content types can have there launch index examined
+# in there payload, evey other content type can still provide a header
+EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"]
+
class UserDataProcessor(object):
def __init__(self, paths):
self.paths = paths
def process(self, blob):
- base_msg = convert_string(blob)
- process_msg = MIMEMultipart()
- self._process_msg(base_msg, process_msg)
- return process_msg
+ accumulating_msg = MIMEMultipart()
+ self._process_msg(convert_string(blob), accumulating_msg)
+ return accumulating_msg
def _process_msg(self, base_msg, append_msg):
for part in base_msg.walk():
- # multipart/* are just containers
- if part.get_content_maintype() == 'multipart':
+ if is_skippable(part):
continue
ctype = None
@@ -82,6 +84,12 @@ class UserDataProcessor(object):
if ctype is None:
ctype = ctype_orig
+ if ctype != ctype_orig:
+ if CONTENT_TYPE in part:
+ part.replace_header(CONTENT_TYPE, ctype)
+ else:
+ part[CONTENT_TYPE] = ctype
+
if ctype in INCLUDE_TYPES:
self._do_include(payload, append_msg)
continue
@@ -90,6 +98,8 @@ class UserDataProcessor(object):
self._explode_archive(payload, append_msg)
continue
+ # Should this be happening, shouldn't
+ # the part header be modified and not the base?
if CONTENT_TYPE in base_msg:
base_msg.replace_header(CONTENT_TYPE, ctype)
else:
@@ -97,11 +107,41 @@ class UserDataProcessor(object):
self._attach_part(append_msg, part)
+ def _attach_launch_index(self, msg):
+ header_idx = msg.get('Launch-Index', None)
+ payload_idx = None
+ if msg.get_content_type() in EXAMINE_FOR_LAUNCH_INDEX:
+ try:
+ # See if it has a launch-index field
+ # that might affect the final header
+ payload = util.load_yaml(msg.get_payload(decode=True))
+ if payload:
+ payload_idx = payload.get('launch-index')
+ except:
+ pass
+ # Header overrides contents, for now (?) or the other way around?
+ if header_idx is not None:
+ payload_idx = header_idx
+ # Nothing found in payload, use header (if anything there)
+ if payload_idx is None:
+ payload_idx = header_idx
+ if payload_idx is not None:
+ try:
+ msg.add_header('Launch-Index', str(int(payload_idx)))
+ except (ValueError, TypeError):
+ pass
+
def _get_include_once_filename(self, entry):
entry_fn = util.hash_blob(entry, 'md5', 64)
return os.path.join(self.paths.get_ipath_cur('data'),
'urlcache', entry_fn)
+ def _process_before_attach(self, msg, attached_id):
+ if not msg.get_filename():
+ msg.add_header('Content-Disposition',
+ 'attachment', filename=PART_FN_TPL % (attached_id))
+ self._attach_launch_index(msg)
+
def _do_include(self, content, append_msg):
# Include a list of urls, one per line
# also support '#include <url here>'
@@ -148,7 +188,7 @@ class UserDataProcessor(object):
self._process_msg(new_msg, append_msg)
def _explode_archive(self, archive, append_msg):
- entries = util.load_yaml(archive, default=[], allowed=[list, set])
+ entries = util.load_yaml(archive, default=[], allowed=(list, set))
for ent in entries:
# ent can be one of:
# dict { 'filename' : 'value', 'content' :
@@ -159,7 +199,7 @@ class UserDataProcessor(object):
if isinstance(ent, (str, basestring)):
ent = {'content': ent}
if not isinstance(ent, (dict)):
- # TODO raise?
+ # TODO(harlowja) raise?
continue
content = ent.get('content', '')
@@ -178,9 +218,11 @@ class UserDataProcessor(object):
if 'filename' in ent:
msg.add_header('Content-Disposition',
'attachment', filename=ent['filename'])
+ if 'launch-index' in ent:
+ msg.add_header('Launch-Index', str(ent['launch-index']))
for header in list(ent.keys()):
- if header in ('content', 'filename', 'type'):
+ if header in ('content', 'filename', 'type', 'launch-index'):
continue
msg.add_header(header, ent['header'])
@@ -204,21 +246,23 @@ class UserDataProcessor(object):
outer_msg.replace_header(ATTACHMENT_FIELD, str(fetched_count))
return fetched_count
- def _part_filename(self, _unnamed_part, count):
- return PART_FN_TPL % (count + 1)
-
def _attach_part(self, outer_msg, part):
"""
- Attach an part to an outer message. outermsg must be a MIMEMultipart.
- Modifies a header in the message to keep track of number of attachments.
+ Attach a message to an outer message. outermsg must be a MIMEMultipart.
+ Modifies a header in the outer message to keep track of number of attachments.
"""
- cur_c = self._multi_part_count(outer_msg)
- if not part.get_filename():
- fn = self._part_filename(part, cur_c)
- part.add_header('Content-Disposition',
- 'attachment', filename=fn)
+ part_count = self._multi_part_count(outer_msg)
+ self._process_before_attach(part, part_count + 1)
outer_msg.attach(part)
- self._multi_part_count(outer_msg, cur_c + 1)
+ self._multi_part_count(outer_msg, part_count + 1)
+
+
+def is_skippable(part):
+ # multipart/* are just containers
+ part_maintype = part.get_content_maintype() or ''
+ if part_maintype.lower() == 'multipart':
+ return True
+ return False
# Coverts a raw string into a mime message
diff --git a/cloudinit/util.py b/cloudinit/util.py
index a8c0cceb..33da73eb 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -24,8 +24,8 @@
from StringIO import StringIO
-import copy as obj_copy
import contextlib
+import copy as obj_copy
import errno
import glob
import grp
@@ -317,8 +317,9 @@ def multi_log(text, console=True, stderr=True,
else:
log.log(log_level, text)
+
def is_ipv4(instr):
- """ determine if input string is a ipv4 address. return boolean"""
+ """determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.')
if len(toks) != 4:
return False
@@ -826,12 +827,12 @@ def get_cmdline_url(names=('cloud-config-url', 'url'),
def is_resolvable(name):
- """ determine if a url is resolvable, return a boolean
+ """determine if a url is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
Note, that normal nsswitch resolution is used here. So in order
to avoid any utilization of 'search' entries in /etc/resolv.conf
- we have to append '.'.
+ we have to append '.'.
The top level 'invalid' domain is invalid per RFC. And example.com
should also not exist. The random entry will be resolved inside
@@ -847,7 +848,7 @@ def is_resolvable(name):
try:
result = socket.getaddrinfo(iname, None, 0, 0,
socket.SOCK_STREAM, socket.AI_CANONNAME)
- badresults[iname] = []
+ badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
badips.add(sockaddr[0])
@@ -856,7 +857,7 @@ def is_resolvable(name):
_DNS_REDIRECT_IP = badips
if badresults:
LOG.debug("detected dns redirection: %s" % badresults)
-
+
try:
result = socket.getaddrinfo(name, None)
# check first result's sockaddr field
@@ -874,7 +875,7 @@ def get_hostname():
def is_resolvable_url(url):
- """ determine if this url is resolvable (existing or ip) """
+ """determine if this url is resolvable (existing or ip)."""
return (is_resolvable(urlparse.urlparse(url).hostname))
@@ -1105,7 +1106,7 @@ def hash_blob(blob, routine, mlen=None):
def rename(src, dest):
LOG.debug("Renaming %s to %s", src, dest)
- # TODO use a se guard here??
+ # TODO(harlowja) use a se guard here??
os.rename(src, dest)
@@ -1284,12 +1285,15 @@ def ensure_file(path, mode=0644):
write_file(path, content='', omode="ab", mode=mode)
-def chmod(path, mode):
- real_mode = None
+def safe_int(possible_int):
try:
- real_mode = int(mode)
+ return int(possible_int)
except (ValueError, TypeError):
- pass
+ return None
+
+
+def chmod(path, mode):
+ real_mode = safe_int(mode)
if path and real_mode:
with SeLinuxGuard(path):
os.chmod(path, real_mode)
@@ -1329,12 +1333,19 @@ def delete_dir_contents(dirname):
del_file(node_fullpath)
-def subp(args, data=None, rcs=None, env=None, capture=True, shell=False):
+def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
+ logstring=False):
if rcs is None:
rcs = [0]
try:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+
+ if not logstring:
+ LOG.debug(("Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+ else:
+ LOG.debug(("Running hidden command to protect sensitive "
+ "input/output logstring: %s"), logstring)
+
if not capture:
stdout = None
stderr = None
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 72e413d5..b3411d11 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -1,8 +1,9 @@
# The top level settings are used as module
# and system configuration.
-# This user will have its password adjusted
-user: ubuntu
+# Implement for Ubuntu only: create the default 'ubuntu' user
+users:
+ - default
# If this is set, 'root' will not be able to ssh in and they
# will get a message to login instead as the above $user (ubuntu)
@@ -28,10 +29,14 @@ cloud_init_modules:
- update_etc_hosts
- ca-certs
- rsyslog
+ - users-groups
- ssh
# The modules that run in the 'config' stage
cloud_config_modules:
+# Emit the cloud config ready event
+# this can be used by upstart jobs for 'start on cloud-config'.
+ - emit_upstart
- mounts
- ssh-import-id
- locale
@@ -56,6 +61,7 @@ cloud_final_modules:
- scripts-per-boot
- scripts-per-instance
- scripts-user
+ - ssh-authkey-fingerprints
- keys-to-console
- phone-home
- final-message
@@ -70,6 +76,18 @@ system_info:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
upstart_dir: /etc/init/
- package_mirror: http://archive.ubuntu.com/ubuntu
- package_mirror_ec2_template: http://%(region)s.ec2.archive.ubuntu.com/ubuntu/
+ package_mirrors:
+ - arches: [i386, amd64]
+ failsafe:
+ primary: http://archive.ubuntu.com/ubuntu
+ security: http://security.ubuntu.com/ubuntu
+ search:
+ primary:
+ - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/
+ - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/
+ security: []
+ - arches: [armhf, armel, default]
+ failsafe:
+ primary: http://ports.ubuntu.com/ubuntu-ports
+ security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
diff --git a/doc/examples/cloud-config-archive-launch-index.txt b/doc/examples/cloud-config-archive-launch-index.txt
new file mode 100644
index 00000000..e2ac2869
--- /dev/null
+++ b/doc/examples/cloud-config-archive-launch-index.txt
@@ -0,0 +1,30 @@
+#cloud-config-archive
+
+# This is an example of a cloud archive
+# format which includes a set of launch indexes
+# that will be filtered on (thus only showing
+# up in instances with that launch index), this
+# is done by adding the 'launch-index' key which
+# maps to the integer 'launch-index' that the
+# corresponding content should be used with.
+#
+# It is possible to leave this value out which
+# will mean that the content will be applicable
+# for all instances
+
+- type: foo/wark
+ filename: bar
+ content: |
+ This is my payload
+ hello
+ launch-index: 1 # I will only be used on launch-index 1
+- this is also payload
+- |
+ multi line payload
+ here
+-
+ type: text/upstart-job
+ filename: my-upstart.conf
+ content: |
+ whats this, yo?
+ launch-index: 0 # I will only be used on launch-index 0
diff --git a/doc/examples/cloud-config-launch-index.txt b/doc/examples/cloud-config-launch-index.txt
new file mode 100644
index 00000000..e7dfdc0c
--- /dev/null
+++ b/doc/examples/cloud-config-launch-index.txt
@@ -0,0 +1,23 @@
+#cloud-config
+# vim: syntax=yaml
+
+#
+# This is the configuration syntax that can be provided to have
+# a given set of cloud config data show up on a certain launch
+# index (and not other launches) by provided a key here which
+# will act as a filter on the instances userdata. When
+# this key is left out (or non-integer) then the content
+# of this file will always be used for all launch-indexes
+# (ie the previous behavior).
+launch-index: 5
+
+# Upgrade the instance on first boot
+# (ie run apt-get upgrade)
+#
+# Default: false
+#
+apt_upgrade: true
+
+# Other yaml keys below...
+# .......
+# .......
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
new file mode 100644
index 00000000..1da0d717
--- /dev/null
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -0,0 +1,94 @@
+# add groups to the system
+# The following example adds the ubuntu group with members foo and bar and
+# the group cloud-users.
+groups:
+ - ubuntu: [foo,bar]
+ - cloud-users
+
+# add users to the system. Users are added after groups are added.
+users:
+ - default
+ - name: foobar
+ gecos: Foo B. Bar
+ primary-group: foobar
+ groups: users
+ selinux-user: staff_u
+ expiredate: 2012-09-01
+ ssh-import-id: foobar
+ lock-passwd: false
+ passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
+ - name: barfoo
+ gecos: Bar B. Foo
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: users, admin
+ ssh-import-id: None
+ lock-passwd: true
+ ssh-authorized-keys:
+ - <ssh pub key 1>
+ - <ssh pub key 2>
+ - name: cloudy
+ gecos: Magic Cloud App Daemon User
+ inactive: true
+ system: true
+
+# Valid Values:
+# name: The user's login name
+# gecos: The user name's real name, i.e. "Bob B. Smith"
+# homedir: Optional. Set to the local path you want to use. Defaults to
+# /home/<username>
+# primary-group: define the primary group. Defaults to a new group created
+# named after the user.
+# groups: Optional. Additional groups to add the user to. Defaults to none
+# selinux-user: Optional. The SELinux user for the user's login, such as
+# "staff_u". When this is omitted the system will select the default
+# SELinux user.
+# lock-passwd: Defaults to true. Lock the password to disable password login
+# inactive: Create the user as inactive
+# passwd: The hash -- not the password itself -- of the password you want
+# to use for this user. You can generate a safe hash via:
+# mkpasswd -m SHA-512 -s 4096
+# (the above command would create a password SHA512 password hash
+# with 4096 salt rounds)
+#
+# Please note: while the use of a hashed password is better than
+# plain text, the use of this feature is not ideal. Also,
+# using a high number of salting rounds will help, but it should
+# not be relied upon.
+#
+# To highlight this risk, running John the Ripper against the
+# example hash above, with a readily available wordlist, revealed
+# the true password in 12 seconds on a i7-2620QM.
+#
+# In other words, this feature is a potential security risk and is
+# provided for your convenience only. If you do not fully trust the
+# medium over which your cloud-config will be transmitted, then you
+# should use SSH authentication only.
+#
+# You have thus been warned.
+# no-create-home: When set to true, do not create home directory.
+# no-user-group: When set to true, do not create a group named after the user.
+# no-log-init: When set to true, do not initialize lastlog and faillog database.
+# ssh-import-id: Optional. Import SSH ids
+# ssh-authorized-key: Optional. Add key to user's ssh authorized keys file
+# sudo: Defaults to none. Set to the sudo string you want to use, i.e.
+# ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following
+# format.
+# sudo:
+# - ALL=(ALL) NOPASSWD:/bin/mysql
+# - ALL=(ALL) ALL
+# Note: Please double check your syntax and make sure it is valid.
+# cloud-init does not parse/check the syntax of the sudo
+# directive.
+# system: Create the user as a system user. This means no home directory.
+#
+# Default user creation: Ubuntu Only
+# Unless you define users, you will get a Ubuntu user on Ubuntu systems with the
+# legacy permission (no password sudo, locked user, etc). If however, you want
+# to have the ubuntu user in addition to other users, you need to instruct
+# cloud-init that you also want the default user. To do this use the following
+# syntax:
+# users:
+# default: True
+# foobar: ...
+#
+# users[0] (the first user in users) overrides the user directive.
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 1e6628d2..56a6c35a 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -167,7 +167,8 @@ mounts:
# complete. This must be an array, and must have 7 fields.
mount_default_fields: [ None, None, "auto", "defaults,nobootwait", "0", "2" ]
-# add each entry to ~/.ssh/authorized_keys for the configured user
+# add each entry to ~/.ssh/authorized_keys for the configured user or the
+# first user defined in the user definition directive.
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
diff --git a/doc/sources/altcloud/README b/doc/sources/altcloud/README
new file mode 100644
index 00000000..87d7949a
--- /dev/null
+++ b/doc/sources/altcloud/README
@@ -0,0 +1,65 @@
+Data souce AltCloud will be used to pick up user data on
+RHEVm and vSphere.
+
+RHEVm:
+======
+For REHVm v3.0 the userdata is injected into the VM using floppy
+injection via the RHEVm dashboard "Custom Properties". The format
+of the Custom Properties entry must be:
+"floppyinject=user-data.txt:<base64 encoded data>"
+
+e.g.: To pass a simple bash script
+
+% cat simple_script.bash
+#!/bin/bash
+echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt
+
+% base64 < simple_script.bash
+IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
+
+To pass this example script to cloud-init running in a RHEVm v3.0 VM
+set the "Custom Properties" when creating the RHEMv v3.0 VM to:
+floppyinject=user-data.txt:IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
+
+NOTE: The prefix with file name must be: "floppyinject=user-data.txt:"
+
+It is also possible to launch a RHEVm v3.0 VM and pass optional user
+data to it using the Delta Cloud.
+For more inforation on Delta Cloud see: http://deltacloud.apache.org
+
+vSphere:
+========
+For VMWare's vSphere the userdata is injected into the VM an ISO
+via the cdrom. This can be done using the vSphere dashboard
+by connecting an ISO image to the CD/DVD drive.
+
+To pass this example script to cloud-init running in a vSphere VM
+set the CD/DVD drive when creating the vSphere VM to point to an
+ISO on the data store.
+
+The ISO must contain the user data:
+
+For example, to pass the same simple_script.bash to vSphere:
+
+Create the ISO:
+===============
+% mkdir my-iso
+
+NOTE: The file name on the ISO must be: "user-data.txt"
+% cp simple_scirpt.bash my-iso/user-data.txt
+
+% genisoimage -o user-data.iso -r my-iso
+
+Verify the ISO:
+===============
+% sudo mkdir /media/vsphere_iso
+% sudo mount -o loop JoeV_CI_02.iso /media/vsphere_iso
+% cat /media/vsphere_iso/user-data.txt
+% sudo umount /media/vsphere_iso
+
+Then, launch the vSphere VM the ISO user-data.iso attached as a CDrom.
+
+It is also possible to launch a vSphere VM and pass optional user
+data to it using the Delta Cloud.
+
+For more inforation on Delta Cloud see: http://deltacloud.apache.org
diff --git a/doc/configdrive/README b/doc/sources/configdrive/README
index ed9033c9..ed9033c9 100644
--- a/doc/configdrive/README
+++ b/doc/sources/configdrive/README
diff --git a/doc/kernel-cmdline.txt b/doc/sources/kernel-cmdline.txt
index 0b77a9af..0b77a9af 100644
--- a/doc/kernel-cmdline.txt
+++ b/doc/sources/kernel-cmdline.txt
diff --git a/doc/nocloud/README b/doc/sources/nocloud/README
index c94b206a..c94b206a 100644
--- a/doc/nocloud/README
+++ b/doc/sources/nocloud/README
diff --git a/doc/ovf/README b/doc/sources/ovf/README
index e3ef12e0..e3ef12e0 100644
--- a/doc/ovf/README
+++ b/doc/sources/ovf/README
diff --git a/doc/ovf/example/ovf-env.xml b/doc/sources/ovf/example/ovf-env.xml
index 13e8f104..13e8f104 100644
--- a/doc/ovf/example/ovf-env.xml
+++ b/doc/sources/ovf/example/ovf-env.xml
diff --git a/doc/ovf/example/ubuntu-server.ovf b/doc/sources/ovf/example/ubuntu-server.ovf
index 846483a1..846483a1 100644
--- a/doc/ovf/example/ubuntu-server.ovf
+++ b/doc/sources/ovf/example/ubuntu-server.ovf
diff --git a/doc/ovf/make-iso b/doc/sources/ovf/make-iso
index 91d0e2e5..91d0e2e5 100755
--- a/doc/ovf/make-iso
+++ b/doc/sources/ovf/make-iso
diff --git a/doc/ovf/ovf-env.xml.tmpl b/doc/sources/ovf/ovf-env.xml.tmpl
index 8e255d43..8e255d43 100644
--- a/doc/ovf/ovf-env.xml.tmpl
+++ b/doc/sources/ovf/ovf-env.xml.tmpl
diff --git a/doc/ovf/ovfdemo.pem b/doc/sources/ovf/ovfdemo.pem
index 5bc629c8..5bc629c8 100644
--- a/doc/ovf/ovfdemo.pem
+++ b/doc/sources/ovf/ovfdemo.pem
diff --git a/doc/ovf/user-data b/doc/sources/ovf/user-data
index bfac51fd..bfac51fd 100644
--- a/doc/ovf/user-data
+++ b/doc/sources/ovf/user-data
diff --git a/systemd/cloud-config.service b/systemd/cloud-config.service
index 696230f6..fc72fc48 100644
--- a/systemd/cloud-config.service
+++ b/systemd/cloud-config.service
@@ -6,8 +6,12 @@ Wants=network.target
[Service]
Type=oneshot
-ExecStart=/usr/bin/cloud-init-cfg all config
+ExecStart=/usr/bin/cloud-init modules --mode=config
RemainAfterExit=yes
+TimeoutSec=0
+
+# Output needs to appear in instance console output
+StandardOutput=tty
[Install]
WantedBy=multi-user.target
diff --git a/systemd/cloud-final.service b/systemd/cloud-final.service
index 23275ee5..f836eab6 100644
--- a/systemd/cloud-final.service
+++ b/systemd/cloud-final.service
@@ -6,8 +6,12 @@ Wants=network.target
[Service]
Type=oneshot
-ExecStart=/usr/bin/cloud-init-cfg all final
+ExecStart=/usr/bin/cloud-init modules --mode=final
RemainAfterExit=yes
+TimeoutSec=0
+
+# Output needs to appear in instance console output
+StandardOutput=tty
[Install]
WantedBy=multi-user.target
diff --git a/systemd/cloud-init-local.service b/systemd/cloud-init-local.service
index 2d57567f..6a551710 100644
--- a/systemd/cloud-init-local.service
+++ b/systemd/cloud-init-local.service
@@ -5,8 +5,12 @@ After=local-fs.target
[Service]
Type=oneshot
-ExecStart=/usr/bin/cloud-init start-local
+ExecStart=/usr/bin/cloud-init init --local
RemainAfterExit=yes
+TimeoutSec=0
+
+# Output needs to appear in instance console output
+StandardOutput=tty
[Install]
WantedBy=multi-user.target
diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service
index b8f6f49d..d4eb9fa5 100644
--- a/systemd/cloud-init.service
+++ b/systemd/cloud-init.service
@@ -6,8 +6,12 @@ Wants=local-fs.target cloud-init-local.service
[Service]
Type=oneshot
-ExecStart=/usr/bin/cloud-init start
+ExecStart=/usr/bin/cloud-init init
RemainAfterExit=yes
+TimeoutSec=0
+
+# Output needs to appear in instance console output
+StandardOutput=tty
[Install]
WantedBy=multi-user.target
diff --git a/templates/sources.list.tmpl b/templates/sources.list.tmpl
index f702025f..ce395b3d 100644
--- a/templates/sources.list.tmpl
+++ b/templates/sources.list.tmpl
@@ -52,9 +52,9 @@ deb-src $mirror $codename-updates universe
# deb http://archive.canonical.com/ubuntu $codename partner
# deb-src http://archive.canonical.com/ubuntu $codename partner
-deb http://security.ubuntu.com/ubuntu $codename-security main
-deb-src http://security.ubuntu.com/ubuntu $codename-security main
-deb http://security.ubuntu.com/ubuntu $codename-security universe
-deb-src http://security.ubuntu.com/ubuntu $codename-security universe
-# deb http://security.ubuntu.com/ubuntu $codename-security multiverse
-# deb-src http://security.ubuntu.com/ubuntu $codename-security multiverse
+deb $security $codename-security main
+deb-src $security $codename-security main
+deb $security $codename-security universe
+deb-src $security $codename-security universe
+# deb $security $codename-security multiverse
+# deb-src $security $codename-security multiverse
diff --git a/tests/data/filter_cloud_multipart.yaml b/tests/data/filter_cloud_multipart.yaml
new file mode 100644
index 00000000..7acc2b9d
--- /dev/null
+++ b/tests/data/filter_cloud_multipart.yaml
@@ -0,0 +1,30 @@
+#cloud-config-archive
+---
+- content: "\n blah: true\n launch-index: 3\n"
+ type: text/cloud-config
+- content: "\n blah: true\n launch-index: 4\n"
+ type: text/cloud-config
+- content: The quick brown fox jumps over the lazy dog
+ filename: b0.txt
+ launch-index: 0
+ type: plain/text
+- content: The quick brown fox jumps over the lazy dog
+ filename: b3.txt
+ launch-index: 3
+ type: plain/text
+- content: The quick brown fox jumps over the lazy dog
+ filename: b2.txt
+ launch-index: 2
+ type: plain/text
+- content: '#!/bin/bash \n echo "stuff"'
+ filename: b2.txt
+ launch-index: 2
+- content: '#!/bin/bash \n echo "stuff"'
+ filename: b2.txt
+ launch-index: 1
+- content: '#!/bin/bash \n echo "stuff"'
+ filename: b2.txt
+ # Use a string to see if conversion works
+ launch-index: "1"
+...
+
diff --git a/tests/data/filter_cloud_multipart_1.email b/tests/data/filter_cloud_multipart_1.email
new file mode 100644
index 00000000..6d93b1f1
--- /dev/null
+++ b/tests/data/filter_cloud_multipart_1.email
@@ -0,0 +1,11 @@
+From nobody Fri Aug 31 17:17:00 2012
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+
+#cloud-config
+b: c
+launch-index: 2
+
+
diff --git a/tests/data/filter_cloud_multipart_2.email b/tests/data/filter_cloud_multipart_2.email
new file mode 100644
index 00000000..b04068c5
--- /dev/null
+++ b/tests/data/filter_cloud_multipart_2.email
@@ -0,0 +1,39 @@
+From nobody Fri Aug 31 17:43:04 2012
+Content-Type: multipart/mixed; boundary="===============1668325974=="
+MIME-Version: 1.0
+
+--===============1668325974==
+Content-Type: text/cloud-config; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+
+#cloud-config
+b: c
+launch-index: 2
+
+
+--===============1668325974==
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+
+#cloud-config-archive
+- content: The quick brown fox jumps over the lazy dog
+ filename: b3.txt
+ launch-index: 3
+ type: plain/text
+
+--===============1668325974==
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+
+#cloud-config
+b: c
+launch-index: 2
+
+
+--===============1668325974==--
diff --git a/tests/data/filter_cloud_multipart_header.email b/tests/data/filter_cloud_multipart_header.email
new file mode 100644
index 00000000..770f7ef1
--- /dev/null
+++ b/tests/data/filter_cloud_multipart_header.email
@@ -0,0 +1,11 @@
+From nobody Fri Aug 31 17:17:00 2012
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Launch-Index: 5
+Content-Transfer-Encoding: 7bit
+
+
+#cloud-config
+b: c
+
+
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
new file mode 100644
index 00000000..d0f09e70
--- /dev/null
+++ b/tests/unittests/helpers.py
@@ -0,0 +1,42 @@
+import os
+
+from mocker import MockerTestCase
+
+from cloudinit import helpers as ch
+
+
+class ResourceUsingTestCase(MockerTestCase):
+ def __init__(self, methodName="runTest"):
+ MockerTestCase.__init__(self, methodName)
+ self.resource_path = None
+
+ def resourceLocation(self, subname=None):
+ if self.resource_path is None:
+ paths = [
+ os.path.join('tests', 'data'),
+ os.path.join('data'),
+ os.path.join(os.pardir, 'tests', 'data'),
+ os.path.join(os.pardir, 'data'),
+ ]
+ for p in paths:
+ if os.path.isdir(p):
+ self.resource_path = p
+ break
+ self.assertTrue((self.resource_path and
+ os.path.isdir(self.resource_path)),
+ msg="Unable to locate test resource data path!")
+ if not subname:
+ return self.resource_path
+ return os.path.join(self.resource_path, subname)
+
+ def readResource(self, name):
+ where = self.resourceLocation(name)
+ with open(where, 'r') as fh:
+ return fh.read()
+
+ def getCloudPaths(self):
+ cp = ch.Paths({
+ 'cloud_dir': self.makeDir(),
+ 'templates_dir': self.resourceLocation(),
+ })
+ return cp
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 464c8c2f..ac082076 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -1,6 +1,6 @@
-import StringIO
import logging
import os
+import StringIO
import sys
from mocker import MockerTestCase, ANY, ARGS, KWARGS
@@ -61,14 +61,14 @@ class TestWalkerHandleHandler(MockerTestCase):
import_mock(self.expected_module_name)
self.mocker.result(self.module_fake)
self.mocker.replay()
-
+
handlers.walker_handle_handler(self.data, self.ctype, self.filename,
self.payload)
-
+
self.assertEqual(1, self.data["handlercount"])
-
+
def test_import_error(self):
- """Module import errors are logged. No handler added to C{pdata}"""
+ """Module import errors are logged. No handler added to C{pdata}."""
import_mock = self.mocker.replace(importer.import_module,
passthrough=False)
import_mock(self.expected_module_name)
@@ -81,7 +81,7 @@ class TestWalkerHandleHandler(MockerTestCase):
self.assertEqual(0, self.data["handlercount"])
def test_attribute_error(self):
- """Attribute errors are logged. No handler added to C{pdata}"""
+ """Attribute errors are logged. No handler added to C{pdata}."""
import_mock = self.mocker.replace(importer.import_module,
passthrough=False)
import_mock(self.expected_module_name)
@@ -156,7 +156,7 @@ class TestHandlerHandlePart(MockerTestCase):
self.payload, self.frequency)
def test_no_handle_when_modfreq_once(self):
- """C{handle_part} is not called if frequency is once"""
+ """C{handle_part} is not called if frequency is once."""
self.frequency = "once"
mod_mock = self.mocker.mock()
getattr(mod_mock, "frequency")
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index 5bba8bc9..ebc0bd51 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -1,4 +1,4 @@
-"""Tests of the built-in user data handlers"""
+"""Tests of the built-in user data handlers."""
import os
@@ -33,7 +33,7 @@ class TestBuiltins(MockerTestCase):
None, None, None)
self.assertEquals(0, len(os.listdir(up_root)))
- def test_upstart_frequency_single(self):
+ def test_upstart_frequency_single(self):
c_root = self.makeDir()
up_root = self.makeDir()
paths = helpers.Paths({
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
new file mode 100644
index 00000000..bda61c7e
--- /dev/null
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -0,0 +1,445 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joe VLcek <JVLcek@RedHat.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+'''
+This test file exercises the code in sources DataSourceAltCloud.py
+'''
+
+import os
+import shutil
+import tempfile
+
+from cloudinit import helpers
+from unittest import TestCase
+
+# Get the cloudinit.sources.DataSourceAltCloud import items needed.
+import cloudinit.sources.DataSourceAltCloud
+from cloudinit.sources.DataSourceAltCloud import DataSourceAltCloud
+from cloudinit.sources.DataSourceAltCloud import read_user_data_callback
+
+
+def _write_cloud_info_file(value):
+ '''
+ Populate the CLOUD_INFO_FILE which would be populated
+ with a cloud backend identifier ImageFactory when building
+ an image with ImageFactory.
+ '''
+ cifile = open(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 'w')
+ cifile.write(value)
+ cifile.close()
+ os.chmod(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 0664)
+
+
+def _remove_cloud_info_file():
+ '''
+ Remove the test CLOUD_INFO_FILE
+ '''
+ os.remove(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE)
+
+
+def _write_user_data_files(mount_dir, value):
+ '''
+ Populate the deltacloud_user_data_file the user_data_file
+ which would be populated with user data.
+ '''
+ deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
+ user_data_file = mount_dir + '/user-data.txt'
+
+ udfile = open(deltacloud_user_data_file, 'w')
+ udfile.write(value)
+ udfile.close()
+ os.chmod(deltacloud_user_data_file, 0664)
+
+ udfile = open(user_data_file, 'w')
+ udfile.write(value)
+ udfile.close()
+ os.chmod(user_data_file, 0664)
+
+
+def _remove_user_data_files(mount_dir,
+ dc_file=True,
+ non_dc_file=True):
+ '''
+ Remove the test files: deltacloud_user_data_file and
+ user_data_file
+ '''
+ deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
+ user_data_file = mount_dir + '/user-data.txt'
+
+ # Ignore any failures removeing files that are already gone.
+ if dc_file:
+ try:
+ os.remove(deltacloud_user_data_file)
+ except OSError:
+ pass
+
+ if non_dc_file:
+ try:
+ os.remove(user_data_file)
+ except OSError:
+ pass
+
+
+class TestGetCloudType(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.get_cloud_type()
+ '''
+
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+
+ def tearDown(self):
+ # Reset
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['dmidecode', '--string', 'system-product-name']
+
+ def test_rhev(self):
+ '''
+ Test method get_cloud_type() for RHEVm systems.
+ Forcing dmidecode return to match a RHEVm system: RHEV Hypervisor
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'RHEV Hypervisor']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('RHEV', \
+ dsrc.get_cloud_type())
+
+ def test_vsphere(self):
+ '''
+ Test method get_cloud_type() for vSphere systems.
+ Forcing dmidecode return to match a vSphere system: RHEV Hypervisor
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'VMware Virtual Platform']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('VSPHERE', \
+ dsrc.get_cloud_type())
+
+ def test_unknown(self):
+ '''
+ Test method get_cloud_type() for unknown systems.
+ Forcing dmidecode return to match an unrecognized return.
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'Unrecognized Platform']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('UNKNOWN', \
+ dsrc.get_cloud_type())
+
+ def test_exception1(self):
+ '''
+ Test method get_cloud_type() where command dmidecode fails.
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['ls', 'bad command']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('UNKNOWN', \
+ dsrc.get_cloud_type())
+
+ def test_exception2(self):
+ '''
+ Test method get_cloud_type() where command dmidecode is not available.
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['bad command']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('UNKNOWN', \
+ dsrc.get_cloud_type())
+
+
+class TestGetDataCloudInfoFile(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.get_data()
+ With a contrived CLOUD_INFO_FILE
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ self.cloud_info_file = tempfile.mkstemp()[1]
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ self.cloud_info_file
+
+ def tearDown(self):
+ # Reset
+
+ # Attempt to remove the temp file ignoring errors
+ try:
+ os.remove(self.cloud_info_file)
+ except OSError:
+ pass
+
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ '/etc/sysconfig/cloud-info'
+
+ def test_rhev(self):
+ '''Success Test module get_data() forcing RHEV.'''
+
+ _write_cloud_info_file('RHEV')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_rhevm = lambda: True
+ self.assertEquals(True, dsrc.get_data())
+
+ def test_vsphere(self):
+ '''Success Test module get_data() forcing VSPHERE.'''
+
+ _write_cloud_info_file('VSPHERE')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_vsphere = lambda: True
+ self.assertEquals(True, dsrc.get_data())
+
+ def test_fail_rhev(self):
+ '''Failure Test module get_data() forcing RHEV.'''
+
+ _write_cloud_info_file('RHEV')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_rhevm = lambda: False
+ self.assertEquals(False, dsrc.get_data())
+
+ def test_fail_vsphere(self):
+ '''Failure Test module get_data() forcing VSPHERE.'''
+
+ _write_cloud_info_file('VSPHERE')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_vsphere = lambda: False
+ self.assertEquals(False, dsrc.get_data())
+
+ def test_unrecognized(self):
+ '''Failure Test module get_data() forcing unrecognized.'''
+
+ _write_cloud_info_file('unrecognized')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals(False, dsrc.get_data())
+
+
+class TestGetDataNoCloudInfoFile(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.get_data()
+ Without a CLOUD_INFO_FILE
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ 'no such file'
+
+ def tearDown(self):
+ # Reset
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ '/etc/sysconfig/cloud-info'
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['dmidecode', '--string', 'system-product-name']
+
+ def test_rhev_no_cloud_file(self):
+ '''Test No cloud info file module get_data() forcing RHEV.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'RHEV Hypervisor']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_rhevm = lambda: True
+ self.assertEquals(True, dsrc.get_data())
+
+ def test_vsphere_no_cloud_file(self):
+ '''Test No cloud info file module get_data() forcing VSPHERE.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'VMware Virtual Platform']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_vsphere = lambda: True
+ self.assertEquals(True, dsrc.get_data())
+
+ def test_failure_no_cloud_file(self):
+ '''Test No cloud info file module get_data() forcing unrecognized.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'Unrecognized Platform']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals(False, dsrc.get_data())
+
+
+class TestUserDataRhevm(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.user_data_rhevm()
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ self.mount_dir = tempfile.mkdtemp()
+
+ _write_user_data_files(self.mount_dir, 'test user data')
+
+ def tearDown(self):
+ # Reset
+
+ _remove_user_data_files(self.mount_dir)
+
+ # Attempt to remove the temp dir ignoring errors
+ try:
+ shutil.rmtree(self.mount_dir)
+ except OSError:
+ pass
+
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ '/etc/sysconfig/cloud-info'
+ cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
+ ['/sbin/modprobe', 'floppy']
+ cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
+ ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
+
+ def test_mount_cb_fails(self):
+ '''Test user_data_rhevm() where mount_cb fails.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
+ ['echo', 'modprobe floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+ def test_modprobe_fails(self):
+ '''Test user_data_rhevm() where modprobe fails.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
+ ['ls', 'modprobe floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+ def test_no_modprobe_cmd(self):
+ '''Test user_data_rhevm() with no modprobe command.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
+ ['bad command', 'modprobe floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+ def test_udevadm_fails(self):
+ '''Test user_data_rhevm() where udevadm fails.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
+ ['ls', 'udevadm floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+ def test_no_udevadm_cmd(self):
+ '''Test user_data_rhevm() with no udevadm command.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
+ ['bad command', 'udevadm floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+
+class TestUserDataVsphere(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.user_data_vsphere()
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ self.mount_dir = tempfile.mkdtemp()
+
+ _write_user_data_files(self.mount_dir, 'test user data')
+
+ def tearDown(self):
+ # Reset
+
+ _remove_user_data_files(self.mount_dir)
+
+ # Attempt to remove the temp dir ignoring errors
+ try:
+ shutil.rmtree(self.mount_dir)
+ except OSError:
+ pass
+
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ '/etc/sysconfig/cloud-info'
+
+ def test_user_data_vsphere(self):
+ '''Test user_data_vsphere() where mount_cb fails.'''
+
+ cloudinit.sources.DataSourceAltCloud.MEDIA_DIR = self.mount_dir
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_vsphere())
+
+
+class TestReadUserDataCallback(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.read_user_data_callback()
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ self.mount_dir = tempfile.mkdtemp()
+
+ _write_user_data_files(self.mount_dir, 'test user data')
+
+ def tearDown(self):
+ # Reset
+
+ _remove_user_data_files(self.mount_dir)
+
+ # Attempt to remove the temp dir ignoring errors
+ try:
+ shutil.rmtree(self.mount_dir)
+ except OSError:
+ pass
+
+ def test_callback_both(self):
+ '''Test read_user_data_callback() with both files.'''
+
+ self.assertEquals('test user data',
+ read_user_data_callback(self.mount_dir))
+
+ def test_callback_dc(self):
+ '''Test read_user_data_callback() with only DC file.'''
+
+ _remove_user_data_files(self.mount_dir,
+ dc_file=False,
+ non_dc_file=True)
+
+ self.assertEquals('test user data',
+ read_user_data_callback(self.mount_dir))
+
+ def test_callback_non_dc(self):
+ '''Test read_user_data_callback() with only non-DC file.'''
+
+ _remove_user_data_files(self.mount_dir,
+ dc_file=True,
+ non_dc_file=False)
+
+ self.assertEquals('test user data',
+ read_user_data_callback(self.mount_dir))
+
+ def test_callback_none(self):
+ '''Test read_user_data_callback() no files are found.'''
+
+ _remove_user_data_files(self.mount_dir)
+ self.assertEquals(None, read_user_data_callback(self.mount_dir))
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
new file mode 100644
index 00000000..55573114
--- /dev/null
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -0,0 +1,177 @@
+from copy import copy
+import json
+import os
+import os.path
+import shutil
+import tempfile
+from unittest import TestCase
+
+from cloudinit.sources import DataSourceConfigDrive as ds
+from cloudinit import util
+
+
+PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
+EC2_META = {
+ 'ami-id': 'ami-00000001',
+ 'ami-launch-index': 0,
+ 'ami-manifest-path': 'FIXME',
+ 'block-device-mapping': {
+ 'ami': 'sda1',
+ 'ephemeral0': 'sda2',
+ 'root': '/dev/sda1',
+ 'swap': 'sda3'},
+ 'hostname': 'sm-foo-test.novalocal',
+ 'instance-action': 'none',
+ 'instance-id': 'i-00000001',
+ 'instance-type': 'm1.tiny',
+ 'local-hostname': 'sm-foo-test.novalocal',
+ 'local-ipv4': None,
+ 'placement': {'availability-zone': 'nova'},
+ 'public-hostname': 'sm-foo-test.novalocal',
+ 'public-ipv4': '',
+ 'public-keys': {'0': {'openssh-key': PUBKEY}},
+ 'reservation-id': 'r-iru5qm4m',
+ 'security-groups': ['default']
+}
+USER_DATA = '#!/bin/sh\necho This is user data\n'
+OSTACK_META = {
+ 'availability_zone': 'nova',
+ 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
+ {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
+ 'hostname': 'sm-foo-test.novalocal',
+ 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
+ 'name': 'sm-foo-test',
+ 'public_keys': {'mykey': PUBKEY},
+ 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
+
+CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
+CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
+
+CFG_DRIVE_FILES_V2 = {
+ 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
+ 'ec2/2009-04-04/user-data': USER_DATA,
+ 'ec2/latest/meta-data.json': json.dumps(EC2_META),
+ 'ec2/latest/user-data': USER_DATA,
+ 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/2012-08-10/user_data': USER_DATA,
+ 'openstack/content/0000': CONTENT_0,
+ 'openstack/content/0001': CONTENT_1,
+ 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/latest/user_data': USER_DATA}
+
+
+class TestConfigDriveDataSource(TestCase):
+
+ def setUp(self):
+ super(TestConfigDriveDataSource, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+
+ def tearDown(self):
+ try:
+ shutil.rmtree(self.tmp)
+ except OSError:
+ pass
+
+ def test_dir_valid(self):
+ """Verify a dir is read as such."""
+
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+
+ found = ds.read_config_drive_dir(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md['instance-id'] = expected_md['uuid']
+
+ self.assertEqual(USER_DATA, found['userdata'])
+ self.assertEqual(expected_md, found['metadata'])
+ self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0)
+ self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1)
+
+ def test_seed_dir_valid_extra(self):
+ """Verify extra files do not affect datasource validity."""
+
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+
+ populate_dir(self.tmp, data)
+
+ found = ds.read_config_drive_dir(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md['instance-id'] = expected_md['uuid']
+
+ self.assertEqual(expected_md, found['metadata'])
+
+ def test_seed_dir_bad_json_metadata(self):
+ """Verify that bad json in metadata raises BrokenConfigDriveDir."""
+ data = copy(CFG_DRIVE_FILES_V2)
+
+ data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
+ data["openstack/latest/meta_data.json"] = "non-json garbage {}"
+
+ populate_dir(self.tmp, data)
+
+ self.assertRaises(ds.BrokenConfigDriveDir,
+ ds.read_config_drive_dir, self.tmp)
+
+ def test_seed_dir_no_configdrive(self):
+ """Verify that no metadata raises NonConfigDriveDir."""
+
+ my_d = os.path.join(self.tmp, "non-configdrive")
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+ data["content/foo"] = "foocontent"
+
+ self.assertRaises(ds.NonConfigDriveDir,
+ ds.read_config_drive_dir, my_d)
+
+ def test_seed_dir_missing(self):
+ """Verify that missing seed_dir raises NonConfigDriveDir."""
+ my_d = os.path.join(self.tmp, "nonexistantdirectory")
+ self.assertRaises(ds.NonConfigDriveDir,
+ ds.read_config_drive_dir, my_d)
+
+ def test_find_candidates(self):
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=config-2": ["/dev/vdb"],
+ }
+
+ def my_devs_with(criteria):
+ return devs_with_answers[criteria]
+
+ try:
+ orig_find_devs_with = util.find_devs_with
+ util.find_devs_with = my_devs_with
+
+ self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
+
+ # add a vfat item
+ # zdd reverse sorts after vdb, but config-2 label is preferred
+ devs_with_answers['TYPE=vfat'] = ["/dev/zdd"]
+ self.assertEqual(["/dev/vdb", "/dev/zdd"],
+ ds.find_candidate_devs())
+
+ # verify that partitions are not considered
+ devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
+ "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
+ self.assertEqual([], ds.find_candidate_devs())
+
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+
+def populate_dir(seed_dir, files):
+ for (name, content) in files.iteritems():
+ path = os.path.join(seed_dir, name)
+ dirname = os.path.dirname(path)
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+ with open(path, "w") as fp:
+ fp.write(content)
+ fp.close()
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index 8a155f39..85e6add0 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -1,8 +1,8 @@
-import os
from copy import copy
+import os
-from cloudinit import url_helper
from cloudinit.sources import DataSourceMAAS
+from cloudinit import url_helper
from mocker import MockerTestCase
@@ -15,7 +15,7 @@ class TestMAASDataSource(MockerTestCase):
self.tmp = self.makeDir()
def test_seed_dir_valid(self):
- """Verify a valid seeddir is read as such"""
+ """Verify a valid seeddir is read as such."""
data = {'instance-id': 'i-valid01',
'local-hostname': 'valid01-hostname',
@@ -35,7 +35,7 @@ class TestMAASDataSource(MockerTestCase):
self.assertFalse(('user-data' in metadata))
def test_seed_dir_valid_extra(self):
- """Verify extra files do not affect seed_dir validity """
+ """Verify extra files do not affect seed_dir validity."""
data = {'instance-id': 'i-valid-extra',
'local-hostname': 'valid-extra-hostname',
@@ -54,7 +54,7 @@ class TestMAASDataSource(MockerTestCase):
self.assertFalse(('foo' in metadata))
def test_seed_dir_invalid(self):
- """Verify that invalid seed_dir raises MAASSeedDirMalformed"""
+ """Verify that invalid seed_dir raises MAASSeedDirMalformed."""
valid = {'instance-id': 'i-instanceid',
'local-hostname': 'test-hostname', 'user-data': ''}
@@ -78,20 +78,20 @@ class TestMAASDataSource(MockerTestCase):
DataSourceMAAS.read_maas_seed_dir, my_d)
def test_seed_dir_none(self):
- """Verify that empty seed_dir raises MAASSeedDirNone"""
+ """Verify that empty seed_dir raises MAASSeedDirNone."""
my_d = os.path.join(self.tmp, "valid_empty")
self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
DataSourceMAAS.read_maas_seed_dir, my_d)
def test_seed_dir_missing(self):
- """Verify that missing seed_dir raises MAASSeedDirNone"""
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
+ """Verify that missing seed_dir raises MAASSeedDirNone."""
+ self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
DataSourceMAAS.read_maas_seed_dir,
os.path.join(self.tmp, "nonexistantdirectory"))
def test_seed_url_valid(self):
- """Verify that valid seed_url is read as such"""
+ """Verify that valid seed_url is read as such."""
valid = {'meta-data/instance-id': 'i-instanceid',
'meta-data/local-hostname': 'test-hostname',
'meta-data/public-keys': 'test-hostname',
@@ -129,11 +129,11 @@ class TestMAASDataSource(MockerTestCase):
valid['meta-data/local-hostname'])
def test_seed_url_invalid(self):
- """Verify that invalid seed_url raises MAASSeedDirMalformed"""
+ """Verify that invalid seed_url raises MAASSeedDirMalformed."""
pass
def test_seed_url_missing(self):
- """Verify seed_url with no found entries raises MAASSeedDirNone"""
+ """Verify seed_url with no found entries raises MAASSeedDirNone."""
pass
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
new file mode 100644
index 00000000..2df4c2f0
--- /dev/null
+++ b/tests/unittests/test_distros/test_generic.py
@@ -0,0 +1,121 @@
+from mocker import MockerTestCase
+
+from cloudinit import distros
+
+unknown_arch_info = {
+ 'arches': ['default'],
+ 'failsafe': {'primary': 'http://fs-primary-default',
+ 'security': 'http://fs-security-default'}
+}
+
+package_mirrors = [
+ {'arches': ['i386', 'amd64'],
+ 'failsafe': {'primary': 'http://fs-primary-intel',
+ 'security': 'http://fs-security-intel'},
+ 'search': {
+ 'primary': ['http://%(ec2_region)s.ec2/',
+ 'http://%(availability_zone)s.clouds/'],
+ 'security': ['http://security-mirror1-intel',
+ 'http://security-mirror2-intel']}},
+ {'arches': ['armhf', 'armel'],
+ 'failsafe': {'primary': 'http://fs-primary-arm',
+ 'security': 'http://fs-security-arm'}},
+ unknown_arch_info
+]
+
+gpmi = distros._get_package_mirror_info # pylint: disable=W0212
+gapmi = distros._get_arch_package_mirror_info # pylint: disable=W0212
+
+
+class TestGenericDistro(MockerTestCase):
+
+ def return_first(self, mlist):
+ if not mlist:
+ return None
+ return mlist[0]
+
+ def return_second(self, mlist):
+ if not mlist:
+ return None
+ return mlist[1]
+
+ def return_none(self, _mlist):
+ return None
+
+ def return_last(self, mlist):
+ if not mlist:
+ return None
+ return(mlist[-1])
+
+ def setUp(self):
+ super(TestGenericDistro, self).setUp()
+ # Make a temp directoy for tests to use.
+ self.tmp = self.makeDir()
+
+ def test_arch_package_mirror_info_unknown(self):
+ """for an unknown arch, we should get back that with arch 'default'."""
+ arch_mirrors = gapmi(package_mirrors, arch="unknown")
+ self.assertEqual(unknown_arch_info, arch_mirrors)
+
+ def test_arch_package_mirror_info_known(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ self.assertEqual(package_mirrors[0], arch_mirrors)
+
+ def test_get_package_mirror_info_az_ec2(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+
+ results = gpmi(arch_mirrors, availability_zone="us-east-1a",
+ mirror_filter=self.return_first)
+ self.assertEqual(results,
+ {'primary': 'http://us-east-1.ec2/',
+ 'security': 'http://security-mirror1-intel'})
+
+ results = gpmi(arch_mirrors, availability_zone="us-east-1a",
+ mirror_filter=self.return_second)
+ self.assertEqual(results,
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror2-intel'})
+
+ results = gpmi(arch_mirrors, availability_zone="us-east-1a",
+ mirror_filter=self.return_none)
+ self.assertEqual(results, package_mirrors[0]['failsafe'])
+
+ def test_get_package_mirror_info_az_non_ec2(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+
+ results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor",
+ mirror_filter=self.return_first)
+ self.assertEqual(results,
+ {'primary': 'http://nova.cloudvendor.clouds/',
+ 'security': 'http://security-mirror1-intel'})
+
+ results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor",
+ mirror_filter=self.return_last)
+ self.assertEqual(results,
+ {'primary': 'http://nova.cloudvendor.clouds/',
+ 'security': 'http://security-mirror2-intel'})
+
+ def test_get_package_mirror_info_none(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+
+ # because both search entries here replacement based on
+ # availability-zone, the filter will be called with an empty list and
+ # failsafe should be taken.
+ results = gpmi(arch_mirrors, availability_zone=None,
+ mirror_filter=self.return_first)
+ self.assertEqual(results,
+ {'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror1-intel'})
+
+ results = gpmi(arch_mirrors, availability_zone=None,
+ mirror_filter=self.return_last)
+ self.assertEqual(results,
+ {'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror2-intel'})
+
+
+#def _get_package_mirror_info(mirror_info, availability_zone=None,
+# mirror_filter=util.search_for_mirror):
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py
new file mode 100644
index 00000000..7ca7cbb6
--- /dev/null
+++ b/tests/unittests/test_filters/test_launch_index.py
@@ -0,0 +1,134 @@
+import copy
+
+import helpers as th
+
+import itertools
+
+from cloudinit.filters import launch_index
+from cloudinit import user_data as ud
+from cloudinit import util
+
+
+def count_messages(root):
+ am = 0
+ for m in root.walk():
+ if ud.is_skippable(m):
+ continue
+ am += 1
+ return am
+
+
+class TestLaunchFilter(th.ResourceUsingTestCase):
+
+ def assertCounts(self, message, expected_counts):
+ orig_message = copy.deepcopy(message)
+ for (index, count) in expected_counts.items():
+ index = util.safe_int(index)
+ filtered_message = launch_index.Filter(index).apply(message)
+ self.assertEquals(count_messages(filtered_message), count)
+ # Ensure original message still ok/not modified
+ self.assertTrue(self.equivalentMessage(message, orig_message))
+
+ def equivalentMessage(self, msg1, msg2):
+ msg1_count = count_messages(msg1)
+ msg2_count = count_messages(msg2)
+ if msg1_count != msg2_count:
+ return False
+ # Do some basic payload checking
+ msg1_msgs = [m for m in msg1.walk()]
+ msg1_msgs = [m for m in
+ itertools.ifilterfalse(ud.is_skippable, msg1_msgs)]
+ msg2_msgs = [m for m in msg2.walk()]
+ msg2_msgs = [m for m in
+ itertools.ifilterfalse(ud.is_skippable, msg2_msgs)]
+ for i in range(0, len(msg2_msgs)):
+ m1_msg = msg1_msgs[i]
+ m2_msg = msg2_msgs[i]
+ if m1_msg.get_charset() != m2_msg.get_charset():
+ return False
+ if m1_msg.is_multipart() != m2_msg.is_multipart():
+ return False
+ m1_py = m1_msg.get_payload(decode=True)
+ m2_py = m2_msg.get_payload(decode=True)
+ if m1_py != m2_py:
+ return False
+ return True
+
+ def testMultiEmailIndex(self):
+ test_data = self.readResource('filter_cloud_multipart_2.email')
+ ud_proc = ud.UserDataProcessor(self.getCloudPaths())
+ message = ud_proc.process(test_data)
+ self.assertTrue(count_messages(message) > 0)
+ # This file should have the following
+ # indexes -> amount mapping in it
+ expected_counts = {
+ 3: 1,
+ 2: 2,
+ None: 3,
+ -1: 0,
+ }
+ self.assertCounts(message, expected_counts)
+
+ def testHeaderEmailIndex(self):
+ test_data = self.readResource('filter_cloud_multipart_header.email')
+ ud_proc = ud.UserDataProcessor(self.getCloudPaths())
+ message = ud_proc.process(test_data)
+ self.assertTrue(count_messages(message) > 0)
+ # This file should have the following
+ # indexes -> amount mapping in it
+ expected_counts = {
+ 5: 1,
+ -1: 0,
+ 'c': 1,
+ None: 1,
+ }
+ self.assertCounts(message, expected_counts)
+
+ def testConfigEmailIndex(self):
+ test_data = self.readResource('filter_cloud_multipart_1.email')
+ ud_proc = ud.UserDataProcessor(self.getCloudPaths())
+ message = ud_proc.process(test_data)
+ self.assertTrue(count_messages(message) > 0)
+ # This file should have the following
+ # indexes -> amount mapping in it
+ expected_counts = {
+ 2: 1,
+ -1: 0,
+ None: 1,
+ }
+ self.assertCounts(message, expected_counts)
+
+ def testNoneIndex(self):
+ test_data = self.readResource('filter_cloud_multipart.yaml')
+ ud_proc = ud.UserDataProcessor(self.getCloudPaths())
+ message = ud_proc.process(test_data)
+ start_count = count_messages(message)
+ self.assertTrue(start_count > 0)
+ filtered_message = launch_index.Filter(None).apply(message)
+ self.assertTrue(self.equivalentMessage(message, filtered_message))
+
+ def testIndexes(self):
+ test_data = self.readResource('filter_cloud_multipart.yaml')
+ ud_proc = ud.UserDataProcessor(self.getCloudPaths())
+ message = ud_proc.process(test_data)
+ start_count = count_messages(message)
+ self.assertTrue(start_count > 0)
+ # This file should have the following
+ # indexes -> amount mapping in it
+ expected_counts = {
+ 2: 2,
+ 3: 2,
+ 1: 2,
+ 0: 1,
+ 4: 1,
+ 7: 0,
+ -1: 0,
+ 100: 0,
+ # None should just give all back
+ None: start_count,
+ # Non ints should be ignored
+ 'c': start_count,
+ # Strings should be converted
+ '1': 2,
+ }
+ self.assertCounts(message, expected_counts)
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 948de4c4..d3df5c50 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -1,8 +1,8 @@
from mocker import MockerTestCase
-from cloudinit import util
from cloudinit import cloud
from cloudinit import helpers
+from cloudinit import util
from cloudinit.config import cc_ca_certs
@@ -64,7 +64,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_empty_trusted_list(self):
- """Test that no certificate are written if 'trusted' list is empty"""
+ """Test that no certificate are written if 'trusted' list is empty."""
config = {"ca-certs": {"trusted": []}}
# No functions should be called
@@ -74,7 +74,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_single_trusted(self):
- """Test that a single cert gets passed to add_ca_certs"""
+ """Test that a single cert gets passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1"]}}
self.mock_add(self.paths, ["CERT1"])
@@ -84,7 +84,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_multiple_trusted(self):
- """Test that multiple certs get passed to add_ca_certs"""
+ """Test that multiple certs get passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
self.mock_add(self.paths, ["CERT1", "CERT2"])
@@ -94,7 +94,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_remove_default_ca_certs(self):
- """Test remove_defaults works as expected"""
+ """Test remove_defaults works as expected."""
config = {"ca-certs": {"remove-defaults": True}}
self.mock_remove(self.paths)
@@ -104,7 +104,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_no_remove_defaults_if_false(self):
- """Test remove_defaults is not called when config value is False"""
+ """Test remove_defaults is not called when config value is False."""
config = {"ca-certs": {"remove-defaults": False}}
self.mock_update()
@@ -113,7 +113,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_correct_order_for_remove_then_add(self):
- """Test remove_defaults is not called when config value is False"""
+ """Test remove_defaults is not called when config value is False."""
config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}}
self.mock_remove(self.paths)
@@ -139,7 +139,7 @@ class TestAddCaCerts(MockerTestCase):
cc_ca_certs.add_ca_certs(self.paths, [])
def test_single_cert(self):
- """Test adding a single certificate to the trusted CAs"""
+ """Test adding a single certificate to the trusted CAs."""
cert = "CERT1\nLINE2\nLINE3"
mock_write = self.mocker.replace(util.write_file, passthrough=False)
@@ -152,7 +152,7 @@ class TestAddCaCerts(MockerTestCase):
cc_ca_certs.add_ca_certs(self.paths, [cert])
def test_multiple_certs(self):
- """Test adding multiple certificates to the trusted CAs"""
+ """Test adding multiple certificates to the trusted CAs."""
certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
expected_cert_file = "\n".join(certs)
diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py
index fbbf07f2..82a4c555 100644
--- a/tests/unittests/test_userdata.py
+++ b/tests/unittests/test_userdata.py
@@ -1,4 +1,4 @@
-"""Tests for handling of userdata within cloud init"""
+"""Tests for handling of userdata within cloud init."""
import StringIO
@@ -54,7 +54,7 @@ class TestConsumeUserData(MockerTestCase):
return log_file
def test_unhandled_type_warning(self):
- """Raw text without magic is ignored but shows warning"""
+ """Raw text without magic is ignored but shows warning."""
ci = stages.Init()
data = "arbitrary text\n"
ci.datasource = FakeDataSource(data)
@@ -70,7 +70,7 @@ class TestConsumeUserData(MockerTestCase):
log_file.getvalue())
def test_mime_text_plain(self):
- """Mime message of type text/plain is ignored but shows warning"""
+ """Mime message of type text/plain is ignored but shows warning."""
ci = stages.Init()
message = MIMEBase("text", "plain")
message.set_payload("Just text")
@@ -86,9 +86,8 @@ class TestConsumeUserData(MockerTestCase):
"Unhandled unknown content-type (text/plain)",
log_file.getvalue())
-
def test_shellscript(self):
- """Raw text starting #!/bin/sh is treated as script"""
+ """Raw text starting #!/bin/sh is treated as script."""
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
ci.datasource = FakeDataSource(script)
@@ -104,7 +103,7 @@ class TestConsumeUserData(MockerTestCase):
self.assertEqual("", log_file.getvalue())
def test_mime_text_x_shellscript(self):
- """Mime message of type text/x-shellscript is treated as script"""
+ """Mime message of type text/x-shellscript is treated as script."""
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
message = MIMEBase("text", "x-shellscript")
@@ -122,7 +121,7 @@ class TestConsumeUserData(MockerTestCase):
self.assertEqual("", log_file.getvalue())
def test_mime_text_plain_shell(self):
- """Mime type text/plain starting #!/bin/sh is treated as script"""
+ """Mime type text/plain starting #!/bin/sh is treated as script."""
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
message = MIMEBase("text", "plain")
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 19f66cc4..15fcbd26 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,11 +1,11 @@
import os
import stat
-from unittest import TestCase
from mocker import MockerTestCase
+from unittest import TestCase
-from cloudinit import util
from cloudinit import importer
+from cloudinit import util
class FakeSelinux(object):
diff --git a/tools/hacking.py b/tools/hacking.py
index a2e6e829..11163df3 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -100,7 +100,7 @@ def cloud_todo_format(physical_line):
"""
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
- pos2 = physical_line.find('#') # make sure it's a comment
+ pos2 = physical_line.find('#') # make sure it's a comment
if (pos != pos1 and pos2 >= 0 and pos2 < pos):
return pos, "N101: Use TODO(NAME)"
@@ -133,7 +133,6 @@ def cloud_docstring_multiline_end(physical_line):
return (pos, "N403: multi line docstring end on new line")
-
current_file = ""
@@ -169,4 +168,3 @@ if __name__ == "__main__":
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
% len(_missingImport))
-
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index 78838f64..c79f0598 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -6,7 +6,7 @@
"""
To use this to mimic the EC2 metadata service entirely, run it like:
- # Where 'eth0' is *some* interface.
+ # Where 'eth0' is *some* interface.
sudo ifconfig eth0:0 169.254.169.254 netmask 255.255.255.255
sudo ./mock-meta.py -a 169.254.169.254 -p 80
@@ -23,7 +23,7 @@ import json
import logging
import os
import random
-import string # pylint: disable=W0402
+import string # pylint: disable=W0402
import sys
import yaml
@@ -156,6 +156,8 @@ def traverse(keys, mp):
ID_CHARS = [c for c in (string.ascii_uppercase + string.digits)]
+
+
def id_generator(size=6, lower=False):
txt = ''.join(random.choice(ID_CHARS) for x in range(size))
if lower:
@@ -235,11 +237,11 @@ class MetaDataHandler(object):
nparams = params[1:]
# This is a weird kludge, why amazon why!!!
# public-keys is messed up, list of /latest/meta-data/public-keys/
- # shows something like: '0=brickies'
- # but a GET to /latest/meta-data/public-keys/0=brickies will fail
- # you have to know to get '/latest/meta-data/public-keys/0', then
- # from there you get a 'openssh-key', which you can get.
- # this hunk of code just re-works the object for that.
+ # shows something like: '0=brickies'
+ # but a GET to /latest/meta-data/public-keys/0=brickies will fail
+ # you have to know to get '/latest/meta-data/public-keys/0', then
+ # from there you get a 'openssh-key', which you can get.
+ # this hunk of code just re-works the object for that.
avail_keys = get_ssh_keys()
key_ids = sorted(list(avail_keys.keys()))
if nparams:
@@ -255,7 +257,7 @@ class MetaDataHandler(object):
"openssh-key": "\n".join(avail_keys[key_name]),
})
if isinstance(result, (dict)):
- # TODO: This might not be right??
+ # TODO(harlowja): This might not be right??
result = "\n".join(sorted(result.keys()))
if not result:
result = ''
@@ -304,13 +306,13 @@ class UserDataHandler(object):
blob = "\n".join(lines)
return blob.strip()
- def get_data(self, params, who, **kwargs): # pylint: disable=W0613
+ def get_data(self, params, who, **kwargs): # pylint: disable=W0613
if not params:
return self._get_user_blob(who=who)
return NOT_IMPL_RESPONSE
-# Seem to need to use globals since can't pass
+# Seem to need to use globals since can't pass
# data into the request handlers instances...
# Puke!
meta_fetcher = None
@@ -432,7 +434,7 @@ def setup_fetchers(opts):
def run_server():
- # Using global here since it doesn't seem like we
+ # Using global here since it doesn't seem like we
# can pass opts into a request handler constructor...
opts = extract_opts()
setup_logging(logging.DEBUG)
diff --git a/upstart/cloud-init-container.conf b/upstart/cloud-init-container.conf
new file mode 100644
index 00000000..051c6e50
--- /dev/null
+++ b/upstart/cloud-init-container.conf
@@ -0,0 +1,51 @@
+# in a lxc container, events for network interfaces do not
+# get created or may be missed. This helps cloud-init-nonet along
+# by emitting those events if they have not been emitted.
+
+start on container
+stop on static-network-up
+task
+
+emits net-device-added
+
+console output
+
+script
+ # if we are inside a container, then we may have to emit the ifup
+ # events for 'auto' network devices.
+ set -f
+
+ # from /etc/network/if-up.d/upstart
+ MARK_DEV_PREFIX="/run/network/ifup."
+ MARK_STATIC_NETWORK_EMITTED="/run/network/static-network-up-emitted"
+ # if the all static network interfaces are already up, nothing to do
+ [ -f "$MARK_STATIC_NETWORK_EMITTED" ] && exit 0
+
+ # get list of all 'auto' interfaces. if there are none, nothing to do.
+ auto_list=$(ifquery --list --allow auto 2>/dev/null) || :
+ [ -z "$auto_list" ] && exit 0
+ set -- ${auto_list}
+ [ "$*" = "lo" ] && exit 0
+
+ # we only want to emit for interfaces that do not exist, so filter
+ # out anything that does not exist.
+ for iface in "$@"; do
+ [ "$iface" = "lo" ] && continue
+ # skip interfaces that are already up
+ [ -f "${MARK_DEV_PREFIX}${iface}" ] && continue
+
+ if [ -d /sys/net ]; then
+ # if /sys is mounted, and there is no /sys/net/iface, then no device
+ [ -e "/sys/net/$iface" ] && continue
+ else
+ # sys wasn't mounted, so just check via 'ifconfig'
+ ifconfig "$iface" >/dev/null 2>&1 || continue
+ fi
+ initctl emit --no-wait net-device-added "INTERFACE=$iface" &&
+ emitted="$emitted $iface" ||
+ echo "warn: ${UPSTART_JOB} failed to emit net-device-added INTERFACE=$iface"
+ done
+
+ [ -z "${emitted# }" ] ||
+ echo "${UPSTART_JOB}: emitted ifup for ${emitted# }"
+end script
diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf
index 7b69e584..118ffc1c 100644
--- a/upstart/cloud-init-nonet.conf
+++ b/upstart/cloud-init-nonet.conf
@@ -18,8 +18,6 @@ script
[ -f /var/lib/cloud/instance/obj.pkl ] && exit 0
- start networking >/dev/null
-
short=10; long=120;
sleep ${short}
echo $UPSTART_JOB "waiting ${long} seconds for a network device."