summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog15
-rw-r--r--Makefile2
-rwxr-xr-xbin/cloud-init28
-rw-r--r--cloudinit/cloud.py3
-rw-r--r--cloudinit/config/cc_apt_pipelining.py4
-rw-r--r--cloudinit/config/cc_apt_update_upgrade.py136
-rw-r--r--cloudinit/config/cc_bootcmd.py2
-rw-r--r--cloudinit/config/cc_emit_upstart.py48
-rw-r--r--cloudinit/config/cc_final_message.py6
-rw-r--r--cloudinit/config/cc_puppet.py5
-rw-r--r--cloudinit/config/cc_resizefs.py8
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py4
-rw-r--r--cloudinit/config/cc_set_passwords.py12
-rw-r--r--cloudinit/config/cc_ssh.py23
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py96
-rw-r--r--cloudinit/config/cc_ssh_import_id.py54
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py2
-rw-r--r--cloudinit/config/cc_update_hostname.py2
-rw-r--r--cloudinit/config/cc_users_groups.py70
-rw-r--r--cloudinit/config/cc_write_files.py4
-rw-r--r--cloudinit/distros/__init__.py271
-rw-r--r--cloudinit/distros/debian.py4
-rw-r--r--cloudinit/distros/rhel.py4
-rw-r--r--cloudinit/distros/ubuntu.py6
-rw-r--r--cloudinit/handlers/__init__.py2
-rw-r--r--cloudinit/handlers/shell_script.py2
-rw-r--r--cloudinit/log.py2
-rw-r--r--cloudinit/settings.py5
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py299
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py6
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py326
-rw-r--r--cloudinit/sources/DataSourceEc2.py42
-rw-r--r--cloudinit/sources/DataSourceMAAS.py91
-rw-r--r--cloudinit/sources/DataSourceNone.py61
-rw-r--r--cloudinit/sources/__init__.py14
-rw-r--r--cloudinit/ssh_util.py94
-rw-r--r--cloudinit/stages.py4
-rw-r--r--cloudinit/user_data.py4
-rw-r--r--cloudinit/util.py61
-rw-r--r--config/cloud.cfg26
-rw-r--r--doc/examples/cloud-config-user-groups.txt88
-rw-r--r--doc/examples/cloud-config.txt3
-rw-r--r--doc/sources/altcloud/README65
-rw-r--r--doc/sources/configdrive/README (renamed from doc/configdrive/README)0
-rw-r--r--doc/sources/kernel-cmdline.txt (renamed from doc/kernel-cmdline.txt)0
-rw-r--r--doc/sources/nocloud/README (renamed from doc/nocloud/README)0
-rw-r--r--doc/sources/ovf/README (renamed from doc/ovf/README)0
-rw-r--r--doc/sources/ovf/example/ovf-env.xml (renamed from doc/ovf/example/ovf-env.xml)0
-rw-r--r--doc/sources/ovf/example/ubuntu-server.ovf (renamed from doc/ovf/example/ubuntu-server.ovf)0
-rwxr-xr-xdoc/sources/ovf/make-iso (renamed from doc/ovf/make-iso)0
-rw-r--r--doc/sources/ovf/ovf-env.xml.tmpl (renamed from doc/ovf/ovf-env.xml.tmpl)0
-rw-r--r--doc/sources/ovf/ovfdemo.pem (renamed from doc/ovf/ovfdemo.pem)0
-rw-r--r--doc/sources/ovf/user-data (renamed from doc/ovf/user-data)0
-rwxr-xr-xsetup.py38
-rw-r--r--templates/sources.list.tmpl12
-rw-r--r--tests/unittests/test__init__.py35
-rw-r--r--tests/unittests/test_builtin_handlers.py5
-rw-r--r--tests/unittests/test_datasource/test_altcloud.py445
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py177
-rw-r--r--tests/unittests/test_datasource/test_maas.py24
-rw-r--r--tests/unittests/test_distros/test_generic.py121
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py32
-rw-r--r--tests/unittests/test_userdata.py17
-rw-r--r--tests/unittests/test_util.py8
-rwxr-xr-xtools/hacking.py11
-rwxr-xr-xtools/mock-meta.py85
66 files changed, 2584 insertions, 430 deletions
diff --git a/ChangeLog b/ChangeLog
index 8ee50d2a..283d1464 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,19 @@
0.7.0:
+ - search only top level dns for 'instance-data' in DataSourceEc2 (LP: #1040200)
+ - add support for config-drive-v2 (LP:#1037567)
+ - support creating users, including the default user.
+ [Ben Howard] (LP: #1028503)
+ - add apt_reboot_if_required to reboot if an upgrade or package installation
+ forced the need for one (LP: #1038108)
+ - allow distro mirror selection to include availability-zone (LP: #1037727)
+ - allow arch specific mirror selection (select ports.ubuntu.com on arm)
+ LP: #1028501
+ - allow specification of security mirrors (LP: #1006963)
+ - add the 'None' datasource (LP: #906669), which will allow jobs
+ to run even if there is no "real" datasource found.
+ - write ssh authorized keys to console, ssh_authkey_fingerprints
+ config module [Joshua Harlow] (LP: #1010582)
+ - Added RHEVm and vSphere support as source AltCloud [Joseph VLcek]
- add write-files module (LP: #1012854)
- Add setuptools + cheetah to debian package build dependencies (LP: #1022101)
- Adjust the sysvinit local script to provide 'cloud-init-local' and have
diff --git a/Makefile b/Makefile
index e20d0bee..49324ca0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
CWD=$(shell pwd)
-PY_FILES=$(shell find cloudinit bin -name "*.py")
+PY_FILES=$(shell find cloudinit bin tests tools -name "*.py")
PY_FILES+="bin/cloud-init"
all: test
diff --git a/bin/cloud-init b/bin/cloud-init
index 3ecc3dad..1f017475 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -100,29 +100,6 @@ def welcome_format(action):
return templater.render_string(WELCOME_MSG_TPL, tpl_params)
-def emit_cloud_config_ready(cfg, cfgpath):
- # emit the cloud config ready event
- # this can be used by upstart jobs for 'start on cloud-config'. There is a
- # builtin value for 'cc_ready_cmd' if that is not overidden by config, then
- # we check to make sure that /sbin/initctl is availble. This makes it so
- # that non-ubuntu distro will just no-op here with no explicit config.
- cmd = None
- if 'cc_ready_cmd' in cfg:
- cmd = cfg['cc_ready_cmd']
- if isinstance(cmd, str):
- cmd = ['sh', '-c', cmd]
- elif os.path.isfile("/sbin/initctl"):
- cmd = ['initctl', 'emit', 'cloud-config',
- 'CLOUD_CFG=%s' % cfgpath]
-
- if not cmd:
- return
- try:
- util.subp(cmd)
- except:
- LOG.warn("emission of cloud-config event failed")
-
-
def extract_fns(args):
# Files are already opened so lets just pass that along
# since it would of broke if it couldn't have
@@ -361,11 +338,6 @@ def main_modules(action_name, args):
# now that logging is setup and stdout redirected, send welcome
welcome(name, msg=w_msg)
- # send the cloud-config event.
- if name == "config":
- emit_cloud_config_ready(mods.cfg,
- init.paths.get_ipath_cur("cloud_config"))
-
# Stage 5
return run_module_section(mods, name, name)
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 22d9167e..620b3c07 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -82,9 +82,6 @@ class Cloud(object):
def get_locale(self):
return self.datasource.get_locale()
- def get_local_mirror(self):
- return self.datasource.get_local_mirror()
-
def get_hostname(self, fqdn=False):
return self.datasource.get_hostname(fqdn=fqdn)
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 3426099e..02056ee0 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -16,8 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit import util
from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
frequency = PER_INSTANCE
@@ -50,7 +50,7 @@ def handle(_name, cfg, cloud, log, _args):
def write_apt_snippet(cloud, setting, log, f_name):
- """ Writes f_name with apt pipeline depth 'setting' """
+ """Writes f_name with apt pipeline depth 'setting'."""
file_contents = APT_PIPE_TPL % (setting)
diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py
index 1bffa47d..356bb98d 100644
--- a/cloudinit/config/cc_apt_update_upgrade.py
+++ b/cloudinit/config/cc_apt_update_upgrade.py
@@ -20,6 +20,7 @@
import glob
import os
+import time
from cloudinit import templater
from cloudinit import util
@@ -50,20 +51,25 @@ def handle(name, cfg, cloud, log, _args):
upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)
release = get_release()
- mirror = find_apt_mirror(cloud, cfg)
- if not mirror:
+ mirrors = find_apt_mirror_info(cloud, cfg)
+ if not mirrors or "primary" not in mirrors:
log.debug(("Skipping module named %s,"
" no package 'mirror' located"), name)
return
- log.debug("Selected mirror at: %s" % mirror)
+ # backwards compatibility
+ mirror = mirrors["primary"]
+ mirrors["mirror"] = mirror
+
+ log.debug("mirror info: %s" % mirrors)
if not util.get_cfg_option_bool(cfg,
'apt_preserve_sources_list', False):
- generate_sources_list(release, mirror, cloud, log)
- old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror',
- "archive.ubuntu.com/ubuntu")
- rename_apt_lists(old_mir, mirror)
+ generate_sources_list(release, mirrors, cloud, log)
+ old_mirrors = cfg.get('apt_old_mirrors',
+ {"primary": "archive.ubuntu.com/ubuntu",
+ "security": "security.ubuntu.com/ubuntu"})
+ rename_apt_lists(old_mirrors, mirrors)
# Set up any apt proxy
proxy = cfg.get("apt_proxy", None)
@@ -81,8 +87,10 @@ def handle(name, cfg, cloud, log, _args):
# Process 'apt_sources'
if 'apt_sources' in cfg:
- errors = add_sources(cloud, cfg['apt_sources'],
- {'MIRROR': mirror, 'RELEASE': release})
+ params = mirrors
+ params['RELEASE'] = release
+ params['MIRROR'] = mirror
+ errors = add_sources(cloud, cfg['apt_sources'], params)
for e in errors:
log.warn("Source Error: %s", ':'.join(e))
@@ -118,6 +126,20 @@ def handle(name, cfg, cloud, log, _args):
util.logexc(log, "Failed to install packages: %s ", pkglist)
errors.append(e)
+ # kernel and openssl (possibly some other packages)
+ # write a file /var/run/reboot-required after upgrading.
+ # if that file exists and configured, then just stop right now and reboot
+ # TODO(smoser): handle this less voilently
+ reboot_file = "/var/run/reboot-required"
+ if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and
+ os.path.isfile(reboot_file)):
+ log.warn("rebooting after upgrade or install per %s" % reboot_file)
+ time.sleep(1) # give the warning time to get out
+ util.subp(["/sbin/reboot"])
+ time.sleep(60)
+ log.warn("requested reboot did not happen!")
+ errors.append(Exception("requested reboot did not happen!"))
+
if len(errors):
log.warn("%s failed with exceptions, re-raising the last one",
len(errors))
@@ -146,15 +168,18 @@ def mirror2lists_fileprefix(mirror):
return string
-def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):
- oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror))
- nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror))
- if oprefix == nprefix:
- return
- olen = len(oprefix)
- for filename in glob.glob("%s_*" % oprefix):
- # TODO use the cloud.paths.join...
- util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
+def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
+ for (name, omirror) in old_mirrors.iteritems():
+ nmirror = new_mirrors.get(name)
+ if not nmirror:
+ continue
+ oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror))
+ nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror))
+ if oprefix == nprefix:
+ continue
+ olen = len(oprefix)
+ for filename in glob.glob("%s_*" % oprefix):
+ util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
def get_release():
@@ -162,14 +187,17 @@ def get_release():
return stdout.strip()
-def generate_sources_list(codename, mirror, cloud, log):
+def generate_sources_list(codename, mirrors, cloud, log):
template_fn = cloud.get_template_filename('sources.list')
- if template_fn:
- params = {'mirror': mirror, 'codename': codename}
- out_fn = cloud.paths.join(False, '/etc/apt/sources.list')
- templater.render_to_file(template_fn, out_fn, params)
- else:
+ if not template_fn:
log.warn("No template found, not rendering /etc/apt/sources.list")
+ return
+
+ params = {'codename': codename}
+ for k in mirrors:
+ params[k] = mirrors[k]
+ out_fn = cloud.paths.join(False, '/etc/apt/sources.list')
+ templater.render_to_file(template_fn, out_fn, params)
def add_sources(cloud, srclist, template_params=None):
@@ -231,43 +259,47 @@ def add_sources(cloud, srclist, template_params=None):
return errorlist
-def find_apt_mirror(cloud, cfg):
- """ find an apt_mirror given the cloud and cfg provided """
+def find_apt_mirror_info(cloud, cfg):
+ """find an apt_mirror given the cloud and cfg provided."""
mirror = None
- cfg_mirror = cfg.get("apt_mirror", None)
- if cfg_mirror:
- mirror = cfg["apt_mirror"]
- elif "apt_mirror_search" in cfg:
- mirror = util.search_for_mirror(cfg['apt_mirror_search'])
- else:
- mirror = cloud.get_local_mirror()
+ # this is less preferred way of specifying mirror preferred would be to
+ # use the distro's search or package_mirror.
+ mirror = cfg.get("apt_mirror", None)
- mydom = ""
+ search = cfg.get("apt_mirror_search", None)
+ if not mirror and search:
+ mirror = util.search_for_mirror(search)
+ if (not mirror and
+ util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
+ mydom = ""
doms = []
- if not mirror:
- # if we have a fqdn, then search its domain portion first
- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- mydom = ".".join(fqdn.split(".")[1:])
- if mydom:
- doms.append(".%s" % mydom)
+ # if we have a fqdn, then search its domain portion first
+ (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ mydom = ".".join(fqdn.split(".")[1:])
+ if mydom:
+ doms.append(".%s" % mydom)
+
+ doms.extend((".localdomain", "",))
- if (not mirror and
- util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
- doms.extend((".localdomain", "",))
+ mirror_list = []
+ distro = cloud.distro.name
+ mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
+ for post in doms:
+ mirror_list.append(mirrorfmt % (post))
- mirror_list = []
- distro = cloud.distro.name
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
- for post in doms:
- mirror_list.append(mirrorfmt % (post))
+ mirror = util.search_for_mirror(mirror_list)
- mirror = util.search_for_mirror(mirror_list)
+ mirror_info = cloud.datasource.get_package_mirror_info()
- if not mirror:
- mirror = cloud.distro.get_package_mirror()
+ # this is a bit strange.
+ # if mirror is set, then one of the legacy options above set it
+ # but they do not cover security. so we need to get that from
+ # get_package_mirror_info
+ if mirror:
+ mirror_info.update({'primary': mirror})
- return mirror
+ return mirror_info
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index bae1ea54..896cb4d0 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -20,8 +20,8 @@
import os
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
frequency = PER_ALWAYS
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
new file mode 100644
index 00000000..6d376184
--- /dev/null
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -0,0 +1,48 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
+
+frequency = PER_ALWAYS
+
+distros = ['ubuntu', 'debian']
+
+
+def handle(name, _cfg, cloud, log, args):
+ event_names = args
+ if not event_names:
+ # Default to the 'cloud-config'
+ # event for backwards compat.
+ event_names = ['cloud-config']
+ if not os.path.isfile("/sbin/initctl"):
+ log.debug(("Skipping module named %s,"
+ " no /sbin/initctl located"), name)
+ return
+ cfgpath = cloud.paths.get_ipath_cur("cloud_config")
+ for n in event_names:
+ cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
+ try:
+ util.subp(cmd)
+ except Exception as e:
+ # TODO(harlowja), use log exception from utils??
+ log.warn("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index aff03c4e..6b864fda 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -28,7 +28,7 @@ frequency = PER_ALWAYS
# Cheetah formated default message
FINAL_MESSAGE_DEF = ("Cloud-init v. ${version} finished at ${timestamp}."
- " Up ${uptime} seconds.")
+ " Datasource ${datasource}. Up ${uptime} seconds")
def handle(_name, cfg, cloud, log, args):
@@ -51,6 +51,7 @@ def handle(_name, cfg, cloud, log, args):
'uptime': uptime,
'timestamp': ts,
'version': cver,
+ 'datasource': str(cloud.datasource),
}
util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
console=False, stderr=True)
@@ -63,3 +64,6 @@ def handle(_name, cfg, cloud, log, args):
util.write_file(boot_fin_fn, contents)
except:
util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
+
+ if cloud.datasource.is_disconnected:
+ log.warn("Used fallback datasource")
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 467c1496..74ee18e1 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -48,7 +48,8 @@ def handle(name, cfg, cloud, log, _args):
# Create object for reading puppet.conf values
puppet_config = helpers.DefaultingConfigParser()
# Read puppet.conf values from original file in order to be able to
- # mix the rest up. First clean them up (TODO is this really needed??)
+ # mix the rest up. First clean them up
+ # (TODO(harlowja) is this really needed??)
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
puppet_config.readfp(StringIO(cleaned_contents),
@@ -80,7 +81,7 @@ def handle(name, cfg, cloud, log, _args):
for (o, v) in cfg.iteritems():
if o == 'certname':
# Expand %f as the fqdn
- # TODO should this use the cloud fqdn??
+ # TODO(harlowja) should this use the cloud fqdn??
v = v.replace("%f", socket.getfqdn())
# Expand %i as the instance id
v = v.replace("%i", cloud.get_instance_id())
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 256a194f..e7f27944 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -22,8 +22,8 @@ import os
import stat
import time
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
frequency = PER_ALWAYS
@@ -72,12 +72,12 @@ def handle(name, cfg, cloud, log, args):
log.debug("Skipping module named %s, resizing disabled", name)
return
- # TODO is the directory ok to be used??
+ # TODO(harlowja) is the directory ok to be used??
resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
resize_root_d = cloud.paths.join(False, resize_root_d)
util.ensure_dir(resize_root_d)
- # TODO: allow what is to be resized to be configurable??
+ # TODO(harlowja): allow what is to be resized to be configurable??
resize_what = cloud.paths.join(False, "/")
with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.",
dir=resize_root_d, delete=True) as tfh:
@@ -136,5 +136,5 @@ def do_resize(resize_cmd, log):
raise
tot_time = time.time() - start
log.debug("Resizing took %.3f seconds", tot_time)
- # TODO: Should we add a fsck check after this to make
+ # TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 45d41b3f..4bf18516 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -37,9 +37,9 @@
import os
+from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
-from cloudinit.settings import PER_INSTANCE
from urlparse import parse_qs
@@ -72,7 +72,7 @@ def handle(name, _cfg, cloud, log, _args):
captured_excps = []
# These will eventually be then ran by the cc_scripts_user
- # TODO: maybe this should just be a new user data handler??
+ # TODO(harlowja): maybe this should just be a new user data handler??
# Instead of a late module that acts like a user data handler?
scripts_d = cloud.get_ipath_cur('scripts')
urls = mdict[MY_HOOKNAME]
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index ab266741..7d0fbd9f 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -50,8 +50,16 @@ def handle(_name, cfg, cloud, log, args):
expire = util.get_cfg_option_bool(chfg, 'expire', expire)
if not plist and password:
- user = util.get_cfg_option_str(cfg, "user", "ubuntu")
- plist = "%s:%s" % (user, password)
+ user = cloud.distro.get_default_user()
+
+ if 'users' in cfg:
+ user_zero = cfg['users'].keys()[0]
+
+ if user_zero != "default":
+ user = user_zero
+
+ if user:
+ plist = "%s:%s" % (user, password)
errors = []
if plist:
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 4019ae90..439c8eb8 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -18,11 +18,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
import glob
+import os
-from cloudinit import util
from cloudinit import ssh_util
+from cloudinit import util
DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" "
@@ -76,7 +76,7 @@ def handle(_name, cfg, cloud, log, _args):
pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
try:
- # TODO: Is this guard needed?
+ # TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
util.subp(cmd, capture=False)
log.debug("Generated a key for %s from %s", pair[0], pair[1])
@@ -94,7 +94,7 @@ def handle(_name, cfg, cloud, log, _args):
if not os.path.exists(keyfile):
cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
try:
- # TODO: Is this guard needed?
+ # TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
util.subp(cmd, capture=False)
except:
@@ -102,7 +102,16 @@ def handle(_name, cfg, cloud, log, _args):
" %s to file %s"), keytype, keyfile)
try:
- user = util.get_cfg_option_str(cfg, 'user')
+ # TODO(utlemming): consolidate this stanza that occurs in:
+ # cc_ssh_import_id, cc_set_passwords, maybe cc_users_groups.py
+ user = cloud.distro.get_default_user()
+
+ if 'users' in cfg:
+ user_zero = cfg['users'].keys()[0]
+
+ if user_zero != "default":
+ user = user_zero
+
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
DISABLE_ROOT_OPTS)
@@ -124,7 +133,9 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts):
if user:
ssh_util.setup_user_keys(keys, user, '', paths)
- if disable_root and user:
+ if disable_root:
+ if not user:
+ user = "NONE"
key_prefix = disable_root_opts.replace('$USER', user)
else:
key_prefix = ''
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
new file mode 100644
index 00000000..23f5755a
--- /dev/null
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -0,0 +1,96 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import base64
+import hashlib
+
+from prettytable import PrettyTable
+
+from cloudinit import ssh_util
+from cloudinit import util
+
+
+def _split_hash(bin_hash):
+ split_up = []
+ for i in xrange(0, len(bin_hash), 2):
+ split_up.append(bin_hash[i:i + 2])
+ return split_up
+
+
+def _gen_fingerprint(b64_text, hash_meth='md5'):
+ if not b64_text:
+ return ''
+ # TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
+ try:
+ hasher = hashlib.new(hash_meth)
+ hasher.update(base64.b64decode(b64_text))
+ return ":".join(_split_hash(hasher.hexdigest()))
+ except TypeError:
+ # Raised when b64 not really b64...
+ return '?'
+
+
+def _is_printable_key(entry):
+ if any([entry.keytype, entry.base64, entry.comment, entry.options]):
+ if (entry.keytype and
+ entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
+ return True
+ return False
+
+
+def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
+ prefix='ci-info: '):
+ if not key_entries:
+ message = ("%sno authorized ssh keys fingerprints found for user %s."
+ % (prefix, user))
+ util.multi_log(message)
+ return
+ tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
+ 'Comment']
+ tbl = PrettyTable(tbl_fields)
+ for entry in key_entries:
+ if _is_printable_key(entry):
+ row = []
+ row.append(entry.keytype or '-')
+ row.append(_gen_fingerprint(entry.base64, hash_meth) or '-')
+ row.append(entry.options or '-')
+ row.append(entry.comment or '-')
+ tbl.add_row(row)
+ authtbl_s = tbl.get_string()
+ authtbl_lines = authtbl_s.splitlines()
+ max_len = len(max(authtbl_lines, key=len))
+ lines = [
+ util.center("Authorized keys from %s for user %s" %
+ (key_fn, user), "+", max_len),
+ ]
+ lines.extend(authtbl_lines)
+ for line in lines:
+ util.multi_log(text="%s%s\n" % (prefix, line),
+ stderr=False, console=True)
+
+
+def handle(name, cfg, cloud, log, _args):
+ if 'no_ssh_fingerprints' in cfg:
+ log.debug(("Skipping module named %s, "
+ "logging of ssh fingerprints disabled"), name)
+
+ user_name = util.get_cfg_option_str(cfg, "user", "ubuntu")
+ hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
+ extract = ssh_util.extract_authorized_keys
+ (auth_key_fn, auth_key_entries) = extract(user_name, cloud.paths)
+ _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index c58b28ec..c5f07376 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -19,35 +19,69 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import util
+import pwd
# The ssh-import-id only seems to exist on ubuntu (for now)
# https://launchpad.net/ssh-import-id
distros = ['ubuntu']
-def handle(name, cfg, _cloud, log, args):
+def handle(_name, cfg, cloud, log, args):
+
+ # import for "user: XXXXX"
if len(args) != 0:
user = args[0]
ids = []
if len(args) > 1:
ids = args[1:]
- else:
- user = util.get_cfg_option_str(cfg, "user", "ubuntu")
- ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
- if len(ids) == 0:
- log.debug("Skipping module named %s, no ids found to import", name)
+ import_ssh_ids(ids, user, log)
return
- if not user:
- log.debug("Skipping module named %s, no user found to import", name)
+ # import for cloudinit created users
+ elist = []
+ for user in cfg['users'].keys():
+ if user == "default":
+ user = cloud.distro.get_default_user()
+ if not user:
+ continue
+ import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
+ else:
+ if not isinstance(cfg['users'][user], dict):
+ log.debug("cfg['users'][%s] not a dict, skipping ssh_import",
+ user)
+ import_ids = util.get_cfg_option_list(cfg['users'][user],
+ "ssh_import_id", [])
+
+ if not len(import_ids):
+ continue
+
+ try:
+ import_ssh_ids(import_ids, user, log)
+ except Exception as exc:
+ util.logexc(exc, "ssh-import-id failed for: %s %s" %
+ (user, import_ids))
+ elist.append(exc)
+
+ if len(elist):
+ raise elist[0]
+
+
+def import_ssh_ids(ids, user, log):
+ if not (user and ids):
+ log.debug("empty user(%s) or ids(%s). not importing", user, ids)
return
+ try:
+ _check = pwd.getpwnam(user)
+ except KeyError as exc:
+ raise exc
+
cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
log.debug("Importing ssh ids for user %s.", user)
try:
util.subp(cmd, capture=False)
- except util.ProcessExecutionError as e:
+ except util.ProcessExecutionError as exc:
util.logexc(log, "Failed to run command to import %s ssh ids", user)
- raise e
+ raise exc
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 38108da7..4d75000f 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -18,8 +18,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit import util
from cloudinit import templater
+from cloudinit import util
from cloudinit.settings import PER_ALWAYS
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index b84a1a06..1d6679ea 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -20,8 +20,8 @@
import os
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
frequency = PER_ALWAYS
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
new file mode 100644
index 00000000..1e241623
--- /dev/null
+++ b/cloudinit/config/cc_users_groups.py
@@ -0,0 +1,70 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+
+def handle(name, cfg, cloud, log, _args):
+ user_zero = None
+
+ if 'groups' in cfg:
+ for group in cfg['groups']:
+ if isinstance(group, dict):
+ for name, values in group.iteritems():
+ if isinstance(values, list):
+ cloud.distro.create_group(name, values)
+ elif isinstance(values, str):
+ cloud.distro.create_group(name, values.split(','))
+ else:
+ cloud.distro.create_group(group, [])
+
+ if 'users' in cfg:
+ user_zero = None
+
+ for name, user_config in cfg['users'].iteritems():
+ if not user_zero:
+ user_zero = name
+
+ # Handle the default user creation
+ if name == "default" and user_config:
+ log.info("Creating default user")
+
+ # Create the default user if so defined
+ try:
+ cloud.distro.add_default_user()
+
+ if user_zero == name:
+ user_zero = cloud.distro.get_default_user()
+
+ except NotImplementedError:
+
+ if user_zero == name:
+ user_zero = None
+
+ log.warn("Distro has not implemented default user "
+ "creation. No default user will be created")
+ else:
+ # Make options friendly for distro.create_user
+ new_opts = {}
+ if isinstance(user_config, dict):
+ for opt in user_config:
+ new_opts[opt.replace('-', '')] = user_config[opt]
+
+ cloud.distro.create_user(name, **new_opts)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 1bfa4c25..a73d6f4e 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -19,8 +19,8 @@
import base64
import os
-from cloudinit import util
from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
frequency = PER_INSTANCE
@@ -46,7 +46,7 @@ def canonicalize_extraction(encoding_type, log):
return ['application/x-gzip']
if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']:
return ['application/base64', 'application/x-gzip']
- # Yaml already encodes binary data as base64 if it is given to the
+ # Yaml already encodes binary data as base64 if it is given to the
# yaml file as binary, so those will be automatically decoded for you.
# But the above b64 is just for people that are more 'comfortable'
# specifing it manually (which might be a possiblity)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index da4d0180..686c6a9b 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -7,6 +7,7 @@
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -23,12 +24,17 @@
from StringIO import StringIO
import abc
+import grp
+import os
+import pwd
+import re
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import ssh_util
from cloudinit import util
-# TODO: Make this via config??
+# TODO(harlowja): Make this via config??
IFACE_ACTIONS = {
'up': ['ifup', '--all'],
'down': ['ifdown', '--all'],
@@ -40,12 +46,32 @@ LOG = logging.getLogger(__name__)
class Distro(object):
__metaclass__ = abc.ABCMeta
+ default_user = None
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
self.name = name
+ def add_default_user(self):
+ # Adds the distro user using the rules:
+ # - Password is same as username but is locked
+ # - nopasswd sudo access
+
+ user = self.get_default_user()
+ if not user:
+ raise NotImplementedError("No Default user")
+
+ self.create_user(user,
+ plain_text_passwd=user,
+ home="/home/%s" % user,
+ shell="/bin/bash",
+ lockpasswd=True,
+ gecos="%s%s" % (user[0:1].upper(), user[1:]),
+ sudo="ALL=(ALL) NOPASSWD:ALL")
+
+ LOG.info("Added default '%s' user with passwordless sudo", user)
+
@abc.abstractmethod
def install_packages(self, pkglist):
raise NotImplementedError()
@@ -75,8 +101,26 @@ class Distro(object):
def update_package_sources(self):
raise NotImplementedError()
- def get_package_mirror(self):
- return self.get_option('package_mirror')
+ def get_primary_arch(self):
+ arch = os.uname[4]
+ if arch in ("i386", "i486", "i586", "i686"):
+ return "i386"
+ return arch
+
+ def _get_arch_package_mirror_info(self, arch=None):
+ mirror_info = self.get_option("package_mirrors", None)
+ if arch == None:
+ arch = self.get_primary_arch()
+ return _get_arch_package_mirror_info(mirror_info, arch)
+
+ def get_package_mirror_info(self, arch=None,
+ availability_zone=None):
+ # this resolves the package_mirrors config option
+ # down to a single dict of {mirror_name: mirror_url}
+ arch_info = self._get_arch_package_mirror_info(arch)
+
+ return _get_package_mirror_info(availability_zone=availability_zone,
+ mirror_info=arch_info)
def apply_network(self, settings, bring_up=True):
# Write it out
@@ -150,6 +194,227 @@ class Distro(object):
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
+ def isuser(self, name):
+ try:
+ if pwd.getpwnam(name):
+ return True
+ except KeyError:
+ return False
+
+ def get_default_user(self):
+ return self.default_user
+
+ def create_user(self, name, **kwargs):
+ """
+ Creates users for the system using the GNU passwd tools. This
+ will work on an GNU system. This should be overriden on
+ distros where useradd is not desirable or not available.
+ """
+
+ adduser_cmd = ['useradd', name]
+ x_adduser_cmd = ['useradd', name]
+
+ # Since we are creating users, we want to carefully validate the
+ # inputs. If something goes wrong, we can end up with a system
+ # that nobody can login to.
+ adduser_opts = {
+ "gecos": '--comment',
+ "homedir": '--home',
+ "primarygroup": '--gid',
+ "groups": '--groups',
+ "passwd": '--password',
+ "shell": '--shell',
+ "expiredate": '--expiredate',
+ "inactive": '--inactive',
+ }
+
+ adduser_opts_flags = {
+ "nousergroup": '--no-user-group',
+ "system": '--system',
+ "nologinit": '--no-log-init',
+ "nocreatehome": "-M",
+ }
+
+ # Now check the value and create the command
+ for option in kwargs:
+ value = kwargs[option]
+ if option in adduser_opts and value \
+ and isinstance(value, str):
+ adduser_cmd.extend([adduser_opts[option], value])
+
+ # Redact the password field from the logs
+ if option != "password":
+ x_adduser_cmd.extend([adduser_opts[option], value])
+ else:
+ x_adduser_cmd.extend([adduser_opts[option], 'REDACTED'])
+
+ elif option in adduser_opts_flags and value:
+ adduser_cmd.append(adduser_opts_flags[option])
+ x_adduser_cmd.append(adduser_opts_flags[option])
+
+ # Default to creating home directory unless otherwise directed
+ # Also, we do not create home directories for system users.
+ if "nocreatehome" not in kwargs and "system" not in kwargs:
+ adduser_cmd.append('-m')
+
+ # Create the user
+ if self.isuser(name):
+ LOG.warn("User %s already exists, skipping." % name)
+ else:
+ LOG.debug("Creating name %s" % name)
+ try:
+ util.subp(adduser_cmd, logstring=x_adduser_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to create user %s due to error.", e)
+ raise e
+
+ # Set password if plain-text password provided
+ if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
+ self.set_passwd(name, kwargs['plain_text_passwd'])
+
+ # Default locking down the account.
+ if ('lockpasswd' not in kwargs and
+ ('lockpasswd' in kwargs and kwargs['lockpasswd']) or
+ 'system' not in kwargs):
+ try:
+ util.subp(['passwd', '--lock', name])
+ except Exception as e:
+ util.logexc(LOG, ("Failed to disable password logins for"
+ "user %s" % name), e)
+ raise e
+
+ # Configure sudo access
+ if 'sudo' in kwargs:
+ self.write_sudo_rules(name, kwargs['sudo'])
+
+ # Import SSH keys
+ if 'sshauthorizedkeys' in kwargs:
+ keys = set(kwargs['sshauthorizedkeys']) or []
+ ssh_util.setup_user_keys(keys, name, None, self._paths)
+
+ return True
+
+ def set_passwd(self, user, passwd, hashed=False):
+ pass_string = '%s:%s' % (user, passwd)
+ cmd = ['chpasswd']
+
+ if hashed:
+ cmd.append('--encrypted')
+
+ try:
+ util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
+ except Exception as e:
+ util.logexc(LOG, "Failed to set password for %s" % user)
+ raise e
+
+ return True
+
+ def write_sudo_rules(self,
+ user,
+ rules,
+ sudo_file="/etc/sudoers.d/90-cloud-init-users",
+ ):
+
+ content_header = "# user rules for %s" % user
+ content = "%s\n%s %s\n\n" % (content_header, user, rules)
+
+ if isinstance(rules, list):
+ content = "%s\n" % content_header
+ for rule in rules:
+ content += "%s %s\n" % (user, rule)
+ content += "\n"
+
+ if not os.path.exists(sudo_file):
+ util.write_file(sudo_file, content, 0644)
+
+ else:
+ try:
+ with open(sudo_file, 'a') as f:
+ f.write(content)
+ except IOError as e:
+ util.logexc(LOG, "Failed to write %s" % sudo_file, e)
+ raise e
+
+ def isgroup(self, name):
+ try:
+ if grp.getgrnam(name):
+ return True
+ except:
+ return False
+
+ def create_group(self, name, members):
+ group_add_cmd = ['groupadd', name]
+
+ # Check if group exists, and then add it doesn't
+ if self.isgroup(name):
+ LOG.warn("Skipping creation of existing group '%s'" % name)
+ else:
+ try:
+ util.subp(group_add_cmd)
+ LOG.info("Created new group %s" % name)
+ except Exception as e:
+ util.logexc("Failed to create group %s" % name, e)
+
+ # Add members to the group, if so defined
+ if len(members) > 0:
+ for member in members:
+ if not self.isuser(member):
+ LOG.warn("Unable to add group member '%s' to group '%s'"
+ "; user does not exist." % (member, name))
+ continue
+
+ util.subp(['usermod', '-a', '-G', name, member])
+ LOG.info("Added user '%s' to group '%s'" % (member, name))
+
+
+def _get_package_mirror_info(mirror_info, availability_zone=None,
+ mirror_filter=util.search_for_mirror):
+ # given a arch specific 'mirror_info' entry (from package_mirrors)
+ # search through the 'search' entries, and fallback appropriately
+ # return a dict with only {name: mirror} entries.
+
+ ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" %
+ "north|northeast|east|southeast|south|southwest|west|northwest")
+
+ subst = {}
+ if availability_zone:
+ subst['availability_zone'] = availability_zone
+
+ if availability_zone and re.match(ec2_az_re, availability_zone):
+ subst['ec2_region'] = "%s" % availability_zone[0:-1]
+
+ results = {}
+ for (name, mirror) in mirror_info.get('failsafe', {}).iteritems():
+ results[name] = mirror
+
+ for (name, searchlist) in mirror_info.get('search', {}).iteritems():
+ mirrors = []
+ for tmpl in searchlist:
+ try:
+ mirrors.append(tmpl % subst)
+ except KeyError:
+ pass
+
+ found = mirror_filter(mirrors)
+ if found:
+ results[name] = found
+
+ LOG.debug("filtered distro mirror info: %s" % results)
+
+ return results
+
+
+def _get_arch_package_mirror_info(package_mirrors, arch):
+ # pull out the specific arch from a 'package_mirrors' config option
+ default = None
+ for item in package_mirrors:
+ arches = item.get("arches")
+ if arch in arches:
+ return item
+ if "default" in arches:
+ default = item
+ return default
+
def fetch(name):
locs = importer.find_module(name,
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 3247d7ce..da8c1a5b 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -147,3 +147,7 @@ class Distro(distros.Distro):
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
["update"], freq=PER_INSTANCE)
+
+ def get_primary_arch(self):
+ (arch, _err) = util.subp(['dpkg', '--print-architecture'])
+ return str(arch).strip()
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 7fa69f03..d81ee5fb 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -69,7 +69,7 @@ class Distro(distros.Distro):
self.package_command('install', pkglist)
def _write_network(self, settings):
- # TODO fix this... since this is the ubuntu format
+ # TODO(harlowja) fix this... since this is the ubuntu format
entries = translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
@@ -258,7 +258,7 @@ class QuotingConfigObj(ConfigObj):
# This is a util function to translate a ubuntu /etc/network/interfaces 'blob'
# to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/
-# TODO remove when we have python-netcf active...
+# TODO(harlowja) remove when we have python-netcf active...
def translate_network(settings):
# Get the standard cmd, args from the ubuntu format
entries = []
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 77c2aff4..1f4efb59 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -7,6 +7,7 @@
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -21,11 +22,12 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit.distros import debian
-
from cloudinit import log as logging
LOG = logging.getLogger(__name__)
class Distro(debian.Distro):
- pass
+
+ distro_name = 'ubuntu'
+ default_user = 'ubuntu'
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 6d1502f4..99caed1f 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -133,7 +133,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):
modfname = os.path.join(pdata['handlerdir'], "%s" % (modname))
if not modfname.endswith(".py"):
modfname = "%s.py" % (modfname)
- # TODO: Check if path exists??
+ # TODO(harlowja): Check if path exists??
util.write_file(modfname, payload, 0600)
handlers = pdata['handlers']
try:
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index a9d8e544..6c5c11ca 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -43,7 +43,7 @@ class ShellScriptPartHandler(handlers.Handler):
def _handle_part(self, _data, ctype, filename, payload, _frequency):
if ctype in handlers.CONTENT_SIGNALS:
- # TODO: maybe delete existing things here
+ # TODO(harlowja): maybe delete existing things here
return
filename = util.clean_filename(filename)
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 819c85b6..2333e5ee 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -21,8 +21,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
-import logging.handlers
import logging.config
+import logging.handlers
import collections
import os
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 2083cf60..8cc9e3b4 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -31,10 +31,13 @@ CFG_BUILTIN = {
'datasource_list': [
'NoCloud',
'ConfigDrive',
+ 'AltCloud',
'OVF',
'MAAS',
'Ec2',
- 'CloudStack'
+ 'CloudStack',
+ # At the end to act as a 'catch' when none of the above work...
+ 'None',
],
'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
new file mode 100644
index 00000000..69c376a5
--- /dev/null
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -0,0 +1,299 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joe VLcek <JVLcek@RedHat.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''
+This file contains code used to gather the user data passed to an
+instance on RHEVm and vSphere.
+'''
+
+import errno
+import os
+import os.path
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.util import ProcessExecutionError
+
+LOG = logging.getLogger(__name__)
+
+# Needed file paths
+CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
+
+# Shell command lists
+CMD_DMI_SYSTEM = ['/usr/sbin/dmidecode', '--string', 'system-product-name']
+CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
+CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
+
+META_DATA_NOT_SUPPORTED = {
+ 'block-device-mapping': {},
+ 'instance-id': 455,
+ 'local-hostname': 'localhost',
+ 'placement': {},
+ }
+
+
+def read_user_data_callback(mount_dir):
+ '''
+ Description:
+ This callback will be applied by util.mount_cb() on the mounted
+ file.
+
+ Deltacloud file name contains deltacloud. Those not using
+ Deltacloud but instead instrumenting the injection, could
+ drop deltacloud from the file name.
+
+ Input:
+ mount_dir - Mount directory
+
+ Returns:
+ User Data
+
+ '''
+
+ deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
+ user_data_file = mount_dir + '/user-data.txt'
+
+ # First try deltacloud_user_data_file. On failure try user_data_file.
+ try:
+ with open(deltacloud_user_data_file, 'r') as user_data_f:
+ user_data = user_data_f.read().strip()
+ except:
+ try:
+ with open(user_data_file, 'r') as user_data_f:
+ user_data = user_data_f.read().strip()
+ except:
+ util.logexc(LOG, ('Failed accessing user data file.'))
+ return None
+
+ return user_data
+
+
+class DataSourceAltCloud(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed = None
+ self.supported_seed_starts = ("/", "file://")
+
+ def __str__(self):
+ mstr = "%s [seed=%s]" % (util.obj_name(self), self.seed)
+ return mstr
+
+ def get_cloud_type(self):
+ '''
+ Description:
+ Get the type for the cloud back end this instance is running on
+ by examining the string returned by:
+ dmidecode --string system-product-name
+
+ On VMWare/vSphere dmidecode returns: RHEV Hypervisor
+ On VMWare/vSphere dmidecode returns: VMware Virtual Platform
+
+ Input:
+ None
+
+ Returns:
+ One of the following strings:
+ 'RHEV', 'VSPHERE' or 'UNKNOWN'
+
+ '''
+
+ cmd = CMD_DMI_SYSTEM
+ try:
+ (cmd_out, _err) = util.subp(cmd)
+ except ProcessExecutionError, _err:
+ LOG.debug(('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message))
+ return 'UNKNOWN'
+ except OSError, _err:
+ LOG.debug(('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message))
+ return 'UNKNOWN'
+
+ if cmd_out.upper().startswith('RHEV'):
+ return 'RHEV'
+
+ if cmd_out.upper().startswith('VMWARE'):
+ return 'VSPHERE'
+
+ return 'UNKNOWN'
+
+ def get_data(self):
+ '''
+ Description:
+ User Data is passed to the launching instance which
+ is used to perform instance configuration.
+
+ Cloud providers expose the user data differently.
+ It is necessary to determine which cloud provider
+ the current instance is running on to determine
+ how to access the user data. Images built with
+ image factory will contain a CLOUD_INFO_FILE which
+ contains a string identifying the cloud provider.
+
+ Images not built with Imagefactory will try to
+ determine what the cloud provider is based on system
+ information.
+ '''
+
+ LOG.debug('Invoked get_data()')
+
+ if os.path.exists(CLOUD_INFO_FILE):
+ try:
+ cloud_info = open(CLOUD_INFO_FILE)
+ cloud_type = cloud_info.read().strip().upper()
+ cloud_info.close()
+ except:
+ util.logexc(LOG, 'Unable to access cloud info file.')
+ return False
+ else:
+ cloud_type = self.get_cloud_type()
+
+ LOG.debug('cloud_type: ' + str(cloud_type))
+
+ if 'RHEV' in cloud_type:
+ if self.user_data_rhevm():
+ return True
+ elif 'VSPHERE' in cloud_type:
+ if self.user_data_vsphere():
+ return True
+ else:
+ # there was no recognized alternate cloud type
+ # indicating this handler should not be used.
+ return False
+
+ # No user data found
+ util.logexc(LOG, ('Failed accessing user data.'))
+ return False
+
+ def user_data_rhevm(self):
+ '''
+ RHEVM specific userdata read
+
+ If on RHEV-M the user data will be contained on the
+ floppy device in file <user_data_file>
+ To access it:
+ modprobe floppy
+
+ Leverage util.mount_cb to:
+ mkdir <tmp mount dir>
+ mount /dev/fd0 <tmp mount dir>
+ The call back passed to util.mount_cb will do:
+ read <tmp mount dir>/<user_data_file>
+ '''
+
+ return_str = None
+
+ # modprobe floppy
+ try:
+ cmd = CMD_PROBE_FLOPPY
+ (cmd_out, _err) = util.subp(cmd)
+ LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
+ except ProcessExecutionError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+ except OSError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+
+ floppy_dev = '/dev/fd0'
+
+ # udevadm settle for floppy device
+ try:
+ cmd = CMD_UDEVADM_SETTLE
+ cmd.append('--exit-if-exists=' + floppy_dev)
+ (cmd_out, _err) = util.subp(cmd)
+ LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
+ except ProcessExecutionError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+ except OSError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+
+ try:
+ return_str = util.mount_cb(floppy_dev, read_user_data_callback)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ except util.MountFailedError:
+ util.logexc(LOG, ("Failed to mount %s"
+ " when looking for user data"), floppy_dev)
+
+ self.userdata_raw = return_str
+ self.metadata = META_DATA_NOT_SUPPORTED
+
+ if return_str:
+ return True
+ else:
+ return False
+
+ def user_data_vsphere(self):
+ '''
+ vSphere specific userdata read
+
+ If on vSphere the user data will be contained on the
+ cdrom device in file <user_data_file>
+ To access it:
+ Leverage util.mount_cb to:
+ mkdir <tmp mount dir>
+ mount /dev/fd0 <tmp mount dir>
+ The call back passed to util.mount_cb will do:
+ read <tmp mount dir>/<user_data_file>
+ '''
+
+ return_str = None
+ cdrom_list = util.find_devs_with('LABEL=CDROM')
+ for cdrom_dev in cdrom_list:
+ try:
+ return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
+ if return_str:
+ break
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ except util.MountFailedError:
+ util.logexc(LOG, ("Failed to mount %s"
+ " when looking for user data"), cdrom_dev)
+
+ self.userdata_raw = return_str
+ self.metadata = META_DATA_NOT_SUPPORTED
+
+ if return_str:
+ return True
+ else:
+ return False
+
+# Used to match classes to dependencies
+# Source DataSourceAltCloud does not really depend on networking.
+# In the future 'dsmode' like behavior can be added to offer user
+# the ability to run before networking.
+datasources = [
+ (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 751bef4f..f7ffa7cb 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -49,8 +49,7 @@ class DataSourceCloudStack(sources.DataSource):
self.metadata_address = "http://%s/" % (gw_addr)
def get_default_gateway(self):
- """ Returns the default gateway ip address in the dotted format
- """
+ """Returns the default gateway ip address in the dotted format."""
lines = util.load_file("/proc/net/route").splitlines()
for line in lines:
items = line.split("\t")
@@ -132,7 +131,8 @@ class DataSourceCloudStack(sources.DataSource):
def get_instance_id(self):
return self.metadata['instance-id']
- def get_availability_zone(self):
+ @property
+ def availability_zone(self):
return self.metadata['availability-zone']
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 320dd1d1..b8154367 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -30,88 +30,119 @@ LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive"
DEFAULT_MODE = 'pass'
-CFG_DRIVE_FILES = [
+CFG_DRIVE_FILES_V1 = [
"etc/network/interfaces",
"root/.ssh/authorized_keys",
"meta.js",
]
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
- "dsmode": DEFAULT_MODE,
}
-CFG_DRIVE_DEV_ENV = 'CLOUD_INIT_CONFIG_DRIVE_DEVICE'
+VALID_DSMODES = ("local", "net", "pass", "disabled")
class DataSourceConfigDrive(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.cfg = {}
+ self.source = None
self.dsmode = 'local'
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
+ self.version = None
def __str__(self):
- mstr = "%s [%s]" % (util.obj_name(self), self.dsmode)
- mstr += "[seed=%s]" % (self.seed)
+ mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode,
+ self.version)
+ mstr += "[source=%s]" % (self.source)
return mstr
def get_data(self):
found = None
md = {}
- ud = ""
+ results = {}
if os.path.isdir(self.seed_dir):
try:
- (md, ud) = read_config_drive_dir(self.seed_dir)
+ results = read_config_drive_dir(self.seed_dir)
found = self.seed_dir
except NonConfigDriveDir:
util.logexc(LOG, "Failed reading config drive from %s",
self.seed_dir)
if not found:
- dev = find_cfg_drive_device()
- if dev:
+ devlist = find_candidate_devs()
+ for dev in devlist:
try:
- (md, ud) = util.mount_cb(dev, read_config_drive_dir)
+ results = util.mount_cb(dev, read_config_drive_dir)
found = dev
+ break
except (NonConfigDriveDir, util.MountFailedError):
pass
+ except BrokenConfigDriveDir:
+ util.logexc(LOG, "broken config drive: %s", dev)
if not found:
return False
- if 'dsconfig' in md:
- self.cfg = md['dscfg']
-
+ md = results['metadata']
md = util.mergedict(md, DEFAULT_METADATA)
- # Update interfaces and ifup only on the local datasource
- # this way the DataSourceConfigDriveNet doesn't do it also.
- if 'network-interfaces' in md and self.dsmode == "local":
+ user_dsmode = results.get('dsmode', None)
+ if user_dsmode not in VALID_DSMODES + (None,):
+ LOG.warn("user specified invalid mode: %s" % user_dsmode)
+ user_dsmode = None
+
+ dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'],
+ ds_cfg=self.ds_cfg.get('dsmode'),
+ user=user_dsmode)
+
+ if dsmode == "disabled":
+ # most likely user specified
+ return False
+
+ # TODO(smoser): fix this, its dirty.
+ # we want to do some things (writing files and network config)
+ # only on first boot, and even then, we want to do so in the
+ # local datasource (so they happen earlier) even if the configured
+ # dsmode is 'net' or 'pass'. To do this, we check the previous
+ # instance-id
+ prev_iid = get_previous_iid(self.paths)
+ cur_iid = md['instance-id']
+
+ if ('network_config' in results and self.dsmode == "local" and
+ prev_iid != cur_iid):
LOG.debug("Updating network interfaces from config drive (%s)",
- md['dsmode'])
- self.distro.apply_network(md['network-interfaces'])
+ dsmode)
+ self.distro.apply_network(results['network_config'])
- self.seed = found
- self.metadata = md
- self.userdata_raw = ud
+ # file writing occurs in local mode (to be as early as possible)
+ if self.dsmode == "local" and prev_iid != cur_iid and results['files']:
+ LOG.debug("writing injected files")
+ try:
+ write_files(results['files'])
+ except:
+ util.logexc(LOG, "Failed writing files")
+
+ # dsmode != self.dsmode here if:
+ # * dsmode = "pass", pass means it should only copy files and then
+ # pass to another datasource
+ # * dsmode = "net" and self.dsmode = "local"
+ # so that user boothooks would be applied with network, the
+ # local datasource just gets out of the way, and lets the net claim
+ if dsmode != self.dsmode:
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+ return False
- if md['dsmode'] == self.dsmode:
- return True
+ self.source = found
+ self.metadata = md
+ self.userdata_raw = results.get('userdata')
+ self.version = results['cfgdrive_ver']
- LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
- return False
+ return True
def get_public_ssh_keys(self):
if not 'public-keys' in self.metadata:
return []
return self.metadata['public-keys']
- # The data sources' config_obj is a cloud-config formated
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return self.cfg
-
class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths):
@@ -123,48 +154,146 @@ class NonConfigDriveDir(Exception):
pass
-def find_cfg_drive_device():
- """ Get the config drive device. Return a string like '/dev/vdb'
- or None (if there is no non-root device attached). This does not
- check the contents, only reports that if there *were* a config_drive
- attached, it would be this device.
- Note: per config_drive documentation, this is
- "associated as the last available disk on the instance"
- """
+class BrokenConfigDriveDir(Exception):
+ pass
- # This seems to be for debugging??
- if CFG_DRIVE_DEV_ENV in os.environ:
- return os.environ[CFG_DRIVE_DEV_ENV]
- # We are looking for a raw block device (sda, not sda1) with a vfat
- # filesystem on it....
- letters = "abcdefghijklmnopqrstuvwxyz"
- devs = util.find_devs_with("TYPE=vfat")
+def find_candidate_devs():
+ """Return a list of devices that may contain the config drive.
- # Filter out anything not ending in a letter (ignore partitions)
- devs = [f for f in devs if f[-1] in letters]
+ The returned list is sorted by search order where the first item has
+ should be searched first (highest priority)
+
+ config drive v1:
+ Per documentation, this is "associated as the last available disk on the
+ instance", and should be VFAT.
+ Currently, we do not restrict search list to "last available disk"
+
+ config drive v2:
+ Disk should be:
+ * either vfat or iso9660 formated
+ * labeled with 'config-2'
+ """
- # Sort them in reverse so "last" device is first
- devs.sort(reverse=True)
+ by_fstype = (util.find_devs_with("TYPE=vfat") +
+ util.find_devs_with("TYPE=iso9660"))
+ by_label = util.find_devs_with("LABEL=config-2")
- if devs:
- return devs[0]
+ # give preference to "last available disk" (vdb over vda)
+ # note, this is not a perfect rendition of that.
+ by_fstype.sort(reverse=True)
+ by_label.sort(reverse=True)
- return None
+ # combine list of items by putting by-label items first
+ # followed by fstype items, but with dupes removed
+ combined = (by_label + [d for d in by_fstype if d not in by_label])
+
+ # We are looking for block device (sda, not sda1), ignore partitions
+ combined = [d for d in combined if d[-1] not in "0123456789"]
+
+ return combined
def read_config_drive_dir(source_dir):
+ last_e = NonConfigDriveDir("Not found")
+ for finder in (read_config_drive_dir_v2, read_config_drive_dir_v1):
+ try:
+ data = finder(source_dir)
+ return data
+ except NonConfigDriveDir as exc:
+ last_e = exc
+ raise last_e
+
+
+def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
+
+ if (not os.path.isdir(os.path.join(source_dir, "openstack", version)) and
+ os.path.isdir(os.path.join(source_dir, "openstack", "latest"))):
+ LOG.warn("version '%s' not available, attempting to use 'latest'" %
+ version)
+ version = "latest"
+
+ datafiles = (
+ ('metadata',
+ "openstack/%s/meta_data.json" % version, True, json.loads),
+ ('userdata', "openstack/%s/user_data" % version, False, None),
+ ('ec2-metadata', "ec2/latest/metadata.json", False, json.loads),
+ )
+
+ results = {'userdata': None}
+ for (name, path, required, process) in datafiles:
+ fpath = os.path.join(source_dir, path)
+ data = None
+ found = False
+ if os.path.isfile(fpath):
+ try:
+ with open(fpath) as fp:
+ data = fp.read()
+ except Exception as exc:
+ raise BrokenConfigDriveDir("failed to read: %s" % fpath)
+ found = True
+ elif required:
+ raise NonConfigDriveDir("missing mandatory %s" % fpath)
+
+ if found and process:
+ try:
+ data = process(data)
+ except Exception as exc:
+ raise BrokenConfigDriveDir("failed to process: %s" % fpath)
+
+ if found:
+ results[name] = data
+
+ # instance-id is 'uuid' for openstack. just copy it to instance-id.
+ if 'instance-id' not in results['metadata']:
+ try:
+ results['metadata']['instance-id'] = results['metadata']['uuid']
+ except KeyError:
+ raise BrokenConfigDriveDir("No uuid entry in metadata")
+
+ def read_content_path(item):
+ # do not use os.path.join here, as content_path starts with /
+ cpath = os.path.sep.join((source_dir, "openstack",
+ "./%s" % item['content_path']))
+ with open(cpath) as fp:
+ return(fp.read())
+
+ files = {}
+ try:
+ for item in results['metadata'].get('files', {}):
+ files[item['path']] = read_content_path(item)
+
+ # the 'network_config' item in metadata is a content pointer
+ # to the network config that should be applied.
+ # in folsom, it is just a '/etc/network/interfaces' file.
+ item = results['metadata'].get("network_config", None)
+ if item:
+ results['network_config'] = read_content_path(item)
+ except Exception as exc:
+ raise BrokenConfigDriveDir("failed to read file %s: %s" % (item, exc))
+
+ # to openstack, user can specify meta ('nova boot --meta=key=value') and
+ # those will appear under metadata['meta'].
+ # if they specify 'dsmode' they're indicating the mode that they intend
+ # for this datasource to operate in.
+ try:
+ results['dsmode'] = results['metadata']['meta']['dsmode']
+ except KeyError:
+ pass
+
+ results['files'] = files
+ results['cfgdrive_ver'] = 2
+ return results
+
+
+def read_config_drive_dir_v1(source_dir):
"""
- read_config_drive_dir(source_dir):
- read source_dir, and return a tuple with metadata dict and user-data
- string populated. If not a valid dir, raise a NonConfigDriveDir
+ read source_dir, and return a tuple with metadata dict, user-data,
+ files and version (1). If not a valid dir, raise a NonConfigDriveDir
"""
- # TODO: fix this for other operating systems...
- # Ie: this is where https://fedorahosted.org/netcf/ or similar should
- # be hooked in... (or could be)
found = {}
- for af in CFG_DRIVE_FILES:
+ for af in CFG_DRIVE_FILES_V1:
fn = os.path.join(source_dir, af)
if os.path.isfile(fn):
found[af] = fn
@@ -173,11 +302,10 @@ def read_config_drive_dir(source_dir):
raise NonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
md = {}
- ud = ""
keydata = ""
if "etc/network/interfaces" in found:
fn = found["etc/network/interfaces"]
- md['network-interfaces'] = util.load_file(fn)
+ md['network_config'] = util.load_file(fn)
if "root/.ssh/authorized_keys" in found:
fn = found["root/.ssh/authorized_keys"]
@@ -197,21 +325,77 @@ def read_config_drive_dir(source_dir):
(source_dir, "invalid json in meta.js", e))
md['meta_js'] = content
- # Key data override??
+ # keydata in meta_js is preferred over "injected"
keydata = meta_js.get('public-keys', keydata)
if keydata:
lines = keydata.splitlines()
md['public-keys'] = [l for l in lines
if len(l) and not l.startswith("#")]
- for copy in ('dsmode', 'instance-id', 'dscfg'):
- if copy in meta_js:
- md[copy] = meta_js[copy]
+ # config-drive-v1 has no way for openstack to provide the instance-id
+ # so we copy that into metadata from the user input
+ if 'instance-id' in meta_js:
+ md['instance-id'] = meta_js['instance-id']
+
+ results = {'cfgdrive_ver': 1, 'metadata': md}
+
+ # allow the user to specify 'dsmode' in a meta tag
+ if 'dsmode' in meta_js:
+ results['dsmode'] = meta_js['dsmode']
+
+ # config-drive-v1 has no way of specifying user-data, so the user has
+ # to cheat and stuff it in a meta tag also.
+ results['userdata'] = meta_js.get('user-data')
- if 'user-data' in meta_js:
- ud = meta_js['user-data']
+ # this implementation does not support files
+ # (other than network/interfaces and authorized_keys)
+ results['files'] = []
- return (md, ud)
+ return results
+
+
+def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
+ """Determine what mode should be used.
+ valid values are 'pass', 'disabled', 'local', 'net'
+ """
+ # user passed data trumps everything
+ if user is not None:
+ return user
+
+ if ds_cfg is not None:
+ return ds_cfg
+
+ # at config-drive version 1, the default behavior was pass. That
+ # meant to not use use it as primary data source, but expect a ec2 metadata
+ # source. for version 2, we default to 'net', which means
+ # the DataSourceConfigDriveNet, would be used.
+ #
+ # this could change in the future. If there was definitive metadata
+ # that indicated presense of an openstack metadata service, then
+ # we could change to 'pass' by default also. The motivation for that
+ # would be 'cloud-init query' as the web service could be more dynamic
+ if cfgdrv_ver == 1:
+ return "pass"
+ return "net"
+
+
+def get_previous_iid(paths):
+ # interestingly, for this purpose the "previous" instance-id is the current
+ # instance-id. cloud-init hasn't moved them over yet as this datasource
+ # hasn't declared itself found.
+ fname = os.path.join(paths.get_cpath('data'), 'instance-id')
+ try:
+ with open(fname) as fp:
+ return fp.read()
+ except IOError:
+ return None
+
+
+def write_files(files):
+ for (name, content) in files.iteritems():
+ if name[0] != os.sep:
+ name = os.sep + name
+ util.write_file(name, content, mode=0660)
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index d9eb8f17..7e845571 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -40,7 +40,7 @@ DEF_MD_VERSION = '2009-04-04'
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
-DEF_MD_URLS = [DEF_MD_URL, "http://instance-data:8773"]
+DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"]
class DataSourceEc2(sources.DataSource):
@@ -83,40 +83,6 @@ class DataSourceEc2(sources.DataSource):
def get_availability_zone(self):
return self.metadata['placement']['availability-zone']
- def get_local_mirror(self):
- return self.get_mirror_from_availability_zone()
-
- def get_mirror_from_availability_zone(self, availability_zone=None):
- # Return type None indicates there is no cloud specific mirror
- # Availability is like 'us-west-1b' or 'eu-west-1a'
- if availability_zone is None:
- availability_zone = self.get_availability_zone()
-
- if self.is_vpc():
- return None
-
- if not availability_zone:
- return None
-
- mirror_tpl = self.distro.get_option('package_mirror_ec2_template',
- None)
-
- if mirror_tpl is None:
- return None
-
- # in EC2, the 'region' is 'us-east-1' if 'zone' is 'us-east-1a'
- tpl_params = {
- 'zone': availability_zone.strip(),
- 'region': availability_zone[:-1]
- }
- mirror_url = mirror_tpl % (tpl_params)
-
- found = util.search_for_mirror([mirror_url])
- if found is not None:
- return mirror_url
-
- return None
-
def _get_url_settings(self):
mcfg = self.ds_cfg
if not mcfg:
@@ -255,6 +221,12 @@ class DataSourceEc2(sources.DataSource):
return True
return False
+ @property
+ def availability_zone(self):
+ try:
+ return self.metadata['placement']['availability-zone']
+ except KeyError:
+ return None
# Used to match classes to dependencies
datasources = [
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index f16d5c21..c568d365 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -262,3 +262,94 @@ datasources = [
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ def main():
+ """
+ Call with single argument of directory or http or https url.
+ If url is given additional arguments are allowed, which will be
+ interpreted as consumer_key, token_key, token_secret, consumer_secret
+ """
+ import argparse
+ import pprint
+
+ parser = argparse.ArgumentParser(description='Interact with MAAS DS')
+ parser.add_argument("--config", metavar="file",
+ help="specify DS config file", default=None)
+ parser.add_argument("--ckey", metavar="key",
+ help="the consumer key to auth with", default=None)
+ parser.add_argument("--tkey", metavar="key",
+ help="the token key to auth with", default=None)
+ parser.add_argument("--csec", metavar="secret",
+ help="the consumer secret (likely '')", default="")
+ parser.add_argument("--tsec", metavar="secret",
+ help="the token secret to auth with", default=None)
+ parser.add_argument("--apiver", metavar="version",
+ help="the apiver to use ("" can be used)", default=MD_VERSION)
+
+ subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
+ subcmds.add_parser('crawl', help="crawl the datasource")
+ subcmds.add_parser('get', help="do a single GET of provided url")
+ subcmds.add_parser('check-seed', help="read andn verify seed at url")
+
+ parser.add_argument("url", help="the data source to query")
+
+ args = parser.parse_args()
+
+ creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
+ 'token_secret': args.tsec, 'consumer_secret': args.csec}
+
+ if args.config:
+ import yaml
+ with open(args.config) as fp:
+ cfg = yaml.safe_load(fp)
+ if 'datasource' in cfg:
+ cfg = cfg['datasource']['MAAS']
+ for key in creds.keys():
+ if key in cfg and creds[key] is None:
+ creds[key] = cfg[key]
+
+ def geturl(url, headers_cb):
+ req = urllib2.Request(url, data=None, headers=headers_cb(url))
+ return(urllib2.urlopen(req).read())
+
+ def printurl(url, headers_cb):
+ print "== %s ==\n%s\n" % (url, geturl(url, headers_cb))
+
+ def crawl(url, headers_cb=None):
+ if url.endswith("/"):
+ for line in geturl(url, headers_cb).splitlines():
+ if line.endswith("/"):
+ crawl("%s%s" % (url, line), headers_cb)
+ else:
+ printurl("%s%s" % (url, line), headers_cb)
+ else:
+ printurl(url, headers_cb)
+
+ def my_headers(url):
+ headers = {}
+ if creds.get('consumer_key', None) is not None:
+ headers = oauth_headers(url, **creds)
+ return headers
+
+ if args.subcmd == "check-seed":
+ if args.url.startswith("http"):
+ (userdata, metadata) = read_maas_seed_url(args.url,
+ header_cb=my_headers, version=args.apiver)
+ else:
+ (userdata, metadata) = read_maas_seed_url(args.url)
+ print "=== userdata ==="
+ print userdata
+ print "=== metadata ==="
+ pprint.pprint(metadata)
+
+ elif args.subcmd == "get":
+ printurl(args.url, my_headers)
+
+ elif args.subcmd == "crawl":
+ if not args.url.endswith("/"):
+ args.url = "%s/" % args.url
+ crawl(args.url, my_headers)
+
+ main()
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
new file mode 100644
index 00000000..c2125bee
--- /dev/null
+++ b/cloudinit/sources/DataSourceNone.py
@@ -0,0 +1,61 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class DataSourceNone(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
+ self.metadata = {}
+ self.userdata_raw = ''
+
+ def get_data(self):
+ # If the datasource config has any provided 'fallback'
+ # userdata or metadata, use it...
+ if 'userdata_raw' in self.ds_cfg:
+ self.userdata_raw = self.ds_cfg['userdata_raw']
+ if 'metadata' in self.ds_cfg:
+ self.metadata = self.ds_cfg['metadata']
+ return True
+
+ def get_instance_id(self):
+ return 'iid-datasource-none'
+
+ def __str__(self):
+ return util.obj_name(self)
+
+ @property
+ def is_disconnected(self):
+ return True
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceNone, []),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index b25724a5..4719d254 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -65,6 +65,10 @@ class DataSource(object):
self.userdata = self.ud_proc.process(raw_data)
return self.userdata
+ @property
+ def is_disconnected(self):
+ return False
+
def get_userdata_raw(self):
return self.userdata_raw
@@ -113,9 +117,9 @@ class DataSource(object):
def get_locale(self):
return 'en_US.UTF-8'
- def get_local_mirror(self):
- # ??
- return None
+ @property
+ def availability_zone(self):
+ return self.metadata.get('availability-zone')
def get_instance_id(self):
if not self.metadata or 'instance-id' not in self.metadata:
@@ -162,6 +166,10 @@ class DataSource(object):
else:
return hostname
+ def get_package_mirror_info(self):
+ return self.distro.get_package_mirror_info(
+ availability_zone=self.availability_zone)
+
def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
ds_list = list_sources(cfg_list, ds_deps, pkg_list)
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index e0a2f0ca..88a11a1a 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -181,12 +181,11 @@ def parse_authorized_keys(fname):
return contents
-def update_authorized_keys(fname, keys):
- entries = parse_authorized_keys(fname)
+def update_authorized_keys(old_entries, keys):
to_add = list(keys)
- for i in range(0, len(entries)):
- ent = entries[i]
+ for i in range(0, len(old_entries)):
+ ent = old_entries[i]
if ent.empty() or not ent.base64:
continue
# Replace those with the same base64
@@ -199,66 +198,81 @@ def update_authorized_keys(fname, keys):
# Don't add it later
if k in to_add:
to_add.remove(k)
- entries[i] = ent
+ old_entries[i] = ent
# Now append any entries we did not match above
for key in to_add:
- entries.append(key)
+ old_entries.append(key)
# Now format them back to strings...
- lines = [str(b) for b in entries]
+ lines = [str(b) for b in old_entries]
# Ensure it ends with a newline
lines.append('')
return '\n'.join(lines)
-def setup_user_keys(keys, user, key_prefix, paths):
- # Make sure the users .ssh dir is setup accordingly
- pwent = pwd.getpwnam(user)
- ssh_dir = os.path.join(pwent.pw_dir, '.ssh')
- ssh_dir = paths.join(False, ssh_dir)
- if not os.path.exists(ssh_dir):
- util.ensure_dir(ssh_dir, mode=0700)
- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+def users_ssh_info(username, paths):
+ pw_ent = pwd.getpwnam(username)
+ if not pw_ent:
+ raise RuntimeError("Unable to get ssh info for user %r" % (username))
+ ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh'))
+ return (ssh_dir, pw_ent)
- # Turn the keys given into actual entries
- parser = AuthKeyLineParser()
- key_entries = []
- for k in keys:
- key_entries.append(parser.parse(str(k), def_opt=key_prefix))
+def extract_authorized_keys(username, paths):
+ (ssh_dir, pw_ent) = users_ssh_info(username, paths)
sshd_conf_fn = paths.join(True, DEF_SSHD_CFG)
+ auth_key_fn = None
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
- # AuthorizedKeysFile may contain tokens
+ # The 'AuthorizedKeysFile' may contain tokens
# of the form %T which are substituted during connection set-up.
# The following tokens are defined: %% is replaced by a literal
# '%', %h is replaced by the home directory of the user being
# authenticated and %u is replaced by the username of that user.
ssh_cfg = parse_ssh_config_map(sshd_conf_fn)
- akeys = ssh_cfg.get("authorizedkeysfile", '')
- akeys = akeys.strip()
- if not akeys:
- akeys = "%h/.ssh/authorized_keys"
- akeys = akeys.replace("%h", pwent.pw_dir)
- akeys = akeys.replace("%u", user)
- akeys = akeys.replace("%%", '%')
- if not akeys.startswith('/'):
- akeys = os.path.join(pwent.pw_dir, akeys)
- authorized_keys = paths.join(False, akeys)
+ auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
+ if not auth_key_fn:
+ auth_key_fn = "%h/.ssh/authorized_keys"
+ auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir)
+ auth_key_fn = auth_key_fn.replace("%u", username)
+ auth_key_fn = auth_key_fn.replace("%%", '%')
+ if not auth_key_fn.startswith('/'):
+ auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
+ auth_key_fn = paths.join(False, auth_key_fn)
except (IOError, OSError):
- authorized_keys = os.path.join(ssh_dir, 'authorized_keys')
+ # Give up and use a default key filename
+ auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'"
" in ssh config"
- " from %s, using 'AuthorizedKeysFile' file"
- " %s instead"),
- sshd_conf_fn, authorized_keys)
-
- content = update_authorized_keys(authorized_keys, key_entries)
- util.ensure_dir(os.path.dirname(authorized_keys), mode=0700)
- util.write_file(authorized_keys, content, mode=0600)
- util.chownbyid(authorized_keys, pwent.pw_uid, pwent.pw_gid)
+ " from %r, using 'AuthorizedKeysFile' file"
+ " %r instead"),
+ sshd_conf_fn, auth_key_fn)
+ auth_key_entries = parse_authorized_keys(auth_key_fn)
+ return (auth_key_fn, auth_key_entries)
+
+
+def setup_user_keys(keys, username, key_prefix, paths):
+ # Make sure the users .ssh dir is setup accordingly
+ (ssh_dir, pwent) = users_ssh_info(username, paths)
+ if not os.path.isdir(ssh_dir):
+ util.ensure_dir(ssh_dir, mode=0700)
+ util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+
+ # Turn the 'update' keys given into actual entries
+ parser = AuthKeyLineParser()
+ key_entries = []
+ for k in keys:
+ key_entries.append(parser.parse(str(k), def_opt=key_prefix))
+
+ # Extract the old and make the new
+ (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths)
+ with util.SeLinuxGuard(ssh_dir, recursive=True):
+ content = update_authorized_keys(auth_key_entries, key_entries)
+ util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700)
+ util.write_file(auth_key_fn, content, mode=0600)
+ util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
class SshdConfigLine(object):
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 2f6a566c..c9634a90 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -326,7 +326,7 @@ class Init(object):
'paths': self.paths,
'datasource': self.datasource,
}
- # TODO Hmmm, should we dynamically import these??
+ # TODO(harlowja) Hmmm, should we dynamically import these??
def_handlers = [
cc_part.CloudConfigPartHandler(**opts),
ss_part.ShellScriptPartHandler(**opts),
@@ -519,7 +519,7 @@ class Modules(object):
" but not on %s distro. It may or may not work"
" correctly."), name, worked_distros, d_name)
# Use the configs logger and not our own
- # TODO: possibly check the module
+ # TODO(harlowja): possibly check the module
# for having a LOG attr and just give it back
# its own logger?
func_args = [name, self.cfg,
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index f5d01818..af98b488 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -23,9 +23,9 @@
import os
import email
+from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
-from email.mime.base import MIMEBase
from cloudinit import handlers
from cloudinit import log as logging
@@ -159,7 +159,7 @@ class UserDataProcessor(object):
if isinstance(ent, (str, basestring)):
ent = {'content': ent}
if not isinstance(ent, (dict)):
- # TODO raise?
+ # TODO(harlowja) raise?
continue
content = ent.get('content', '')
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 6eb2a10e..6872cc31 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -24,8 +24,8 @@
from StringIO import StringIO
-import copy as obj_copy
import contextlib
+import copy as obj_copy
import errno
import glob
import grp
@@ -55,6 +55,7 @@ from cloudinit import url_helper as uhelp
from cloudinit.settings import (CFG_BUILTIN)
+_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
# Helps cleanup filenames to ensure they aren't FS incompatible
@@ -316,8 +317,9 @@ def multi_log(text, console=True, stderr=True,
else:
log.log(log_level, text)
+
def is_ipv4(instr):
- """ determine if input string is a ipv4 address. return boolean"""
+ """determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.')
if len(toks) != 4:
return False
@@ -825,9 +827,43 @@ def get_cmdline_url(names=('cloud-config-url', 'url'),
def is_resolvable(name):
- """ determine if a url is resolvable, return a boolean """
+ """determine if a url is resolvable, return a boolean
+ This also attempts to be resilent against dns redirection.
+
+ Note, that normal nsswitch resolution is used here. So in order
+ to avoid any utilization of 'search' entries in /etc/resolv.conf
+ we have to append '.'.
+
+ The top level 'invalid' domain is invalid per RFC. And example.com
+ should also not exist. The random entry will be resolved inside
+ the search list.
+ """
+ global _DNS_REDIRECT_IP # pylint: disable=W0603
+ if _DNS_REDIRECT_IP is None:
+ badips = set()
+ badnames = ("does-not-exist.example.com.", "example.invalid.",
+ rand_str())
+ badresults = {}
+ for iname in badnames:
+ try:
+ result = socket.getaddrinfo(iname, None, 0, 0,
+ socket.SOCK_STREAM, socket.AI_CANONNAME)
+ badresults[iname] = []
+ for (_fam, _stype, _proto, cname, sockaddr) in result:
+ badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
+ badips.add(sockaddr[0])
+ except socket.gaierror:
+ pass
+ _DNS_REDIRECT_IP = badips
+ if badresults:
+ LOG.debug("detected dns redirection: %s" % badresults)
+
try:
- socket.getaddrinfo(name, None)
+ result = socket.getaddrinfo(name, None)
+ # check first result's sockaddr field
+ addr = result[0][4][0]
+ if addr in _DNS_REDIRECT_IP:
+ return False
return True
except socket.gaierror:
return False
@@ -839,7 +875,7 @@ def get_hostname():
def is_resolvable_url(url):
- """ determine if this url is resolvable (existing or ip) """
+ """determine if this url is resolvable (existing or ip)."""
return (is_resolvable(urlparse.urlparse(url).hostname))
@@ -1070,7 +1106,7 @@ def hash_blob(blob, routine, mlen=None):
def rename(src, dest):
LOG.debug("Renaming %s to %s", src, dest)
- # TODO use a se guard here??
+ # TODO(harlowja) use a se guard here??
os.rename(src, dest)
@@ -1294,12 +1330,19 @@ def delete_dir_contents(dirname):
del_file(node_fullpath)
-def subp(args, data=None, rcs=None, env=None, capture=True, shell=False):
+def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
+ logstring=False):
if rcs is None:
rcs = [0]
try:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+
+ if not logstring:
+ LOG.debug(("Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+ else:
+ LOG.debug(("Running hidden command to protect sensitive "
+ "input/output logstring: %s"), logstring)
+
if not capture:
stdout = None
stderr = None
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 72e413d5..9c475251 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -1,8 +1,9 @@
# The top level settings are used as module
# and system configuration.
-# This user will have its password adjusted
-user: ubuntu
+# Implement for Ubuntu only: create the default 'ubuntu' user
+users:
+ default: true
# If this is set, 'root' will not be able to ssh in and they
# will get a message to login instead as the above $user (ubuntu)
@@ -28,10 +29,14 @@ cloud_init_modules:
- update_etc_hosts
- ca-certs
- rsyslog
+ - users-groups
- ssh
# The modules that run in the 'config' stage
cloud_config_modules:
+# Emit the cloud config ready event
+# this can be used by upstart jobs for 'start on cloud-config'.
+ - emit_upstart
- mounts
- ssh-import-id
- locale
@@ -56,6 +61,7 @@ cloud_final_modules:
- scripts-per-boot
- scripts-per-instance
- scripts-user
+ - ssh-authkey-fingerprints
- keys-to-console
- phone-home
- final-message
@@ -70,6 +76,18 @@ system_info:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
upstart_dir: /etc/init/
- package_mirror: http://archive.ubuntu.com/ubuntu
- package_mirror_ec2_template: http://%(region)s.ec2.archive.ubuntu.com/ubuntu/
+ package_mirrors:
+ - arches: [i386, amd64]
+ failsafe:
+ primary: http://archive.ubuntu.com/ubuntu
+ security: http://security.ubuntu.com/ubuntu
+ search:
+ primary:
+ - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/
+ - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/
+ security: []
+ - arches: [armhf, armel, default]
+ failsafe:
+ primary: http://ports.ubuntu.com/ubuntu
+ security: http://ports.ubuntu.com/ubuntu
ssh_svcname: ssh
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
new file mode 100644
index 00000000..04f01719
--- /dev/null
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -0,0 +1,88 @@
+# add groups to the system
+# The following example adds the ubuntu group with members foo and bar and
+# the group cloud-users.
+groups:
+ - ubuntu: [foo,bar]
+ - cloud-users
+
+# add users to the system. Users are added after groups are added.
+users:
+ - name: foobar
+ gecos: Foo B. Bar
+ primary-group: foobar
+ groups: users
+ expiredate: 2012-09-01
+ ssh-import-id: foobar
+ lock-passwd: false
+ passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
+ - name: barfoo
+ gecos: Bar B. Foo
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: users, admin
+ ssh-import-id: None
+ lock-passwd: true
+ ssh-authorized-keys:
+ - <ssh pub key 1>
+ - <ssh pub key 2>
+ cloudy:
+ gecos: Magic Cloud App Daemon User
+ inactive: true
+ system: true
+
+# Valid Values:
+# gecos: The user name's real name, i.e. "Bob B. Smith"
+# homedir: Optional. Set to the local path you want to use. Defaults to
+# /home/<username>
+# primary-group: define the primary group. Defaults to a new group created
+# named after the user.
+# groups: Optional. Additional groups to add the user to. Defaults to none
+# lock-passwd: Defaults to true. Lock the password to disable password login
+# inactive: Create the user as inactive
+# passwd: The hash -- not the password itself -- of the password you want
+# to use for this user. You can generate a safe hash via:
+# mkpasswd -m SHA-512 -s 4096
+# (the above command would create a password SHA512 password hash
+# with 4096 salt rounds)
+#
+# Please note: while the use of a hashed password is better than
+# plain text, the use of this feature is not ideal. Also,
+# using a high number of salting rounds will help, but it should
+# not be relied upon.
+#
+# To highlight this risk, running John the Ripper against the
+# example hash above, with a readily available wordlist, revealed
+# the true password in 12 seconds on a i7-2620QM.
+#
+# In other words, this feature is a potential security risk and is
+# provided for your convenience only. If you do not fully trust the
+# medium over which your cloud-config will be transmitted, then you
+# should use SSH authentication only.
+#
+# You have thus been warned.
+# no-create-home: When set to true, do not create home directory.
+# no-user-group: When set to true, do not create a group named after the user.
+# no-log-init: When set to true, do not initialize lastlog and faillog database.
+# ssh-import-id: Optional. Import SSH ids
+# ssh-authorized-key: Optional. Add key to user's ssh authorized keys file
+# sudo: Defaults to none. Set to the sudo string you want to use, i.e.
+# ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following
+# format.
+# sudo:
+# - ALL=(ALL) NOPASSWD:/bin/mysql
+# - ALL=(ALL) ALL
+# Note: Please double check your syntax and make sure it is valid.
+# cloud-init does not parse/check the syntax of the sudo
+# directive.
+# system: Create the user as a system user. This means no home directory.
+#
+# Default user creation: Ubuntu Only
+# Unless you define users, you will get a Ubuntu user on Ubuntu systems with the
+# legacy permission (no password sudo, locked user, etc). If however, you want
+# to have the ubuntu user in addition to other users, you need to instruct
+# cloud-init that you also want the default user. To do this use the following
+# syntax:
+# users:
+# default: True
+# foobar: ...
+#
+# users[0] (the first user in users) overrides the user directive.
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 1e6628d2..56a6c35a 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -167,7 +167,8 @@ mounts:
# complete. This must be an array, and must have 7 fields.
mount_default_fields: [ None, None, "auto", "defaults,nobootwait", "0", "2" ]
-# add each entry to ~/.ssh/authorized_keys for the configured user
+# add each entry to ~/.ssh/authorized_keys for the configured user or the
+# first user defined in the user definition directive.
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
diff --git a/doc/sources/altcloud/README b/doc/sources/altcloud/README
new file mode 100644
index 00000000..87d7949a
--- /dev/null
+++ b/doc/sources/altcloud/README
@@ -0,0 +1,65 @@
+Data souce AltCloud will be used to pick up user data on
+RHEVm and vSphere.
+
+RHEVm:
+======
+For REHVm v3.0 the userdata is injected into the VM using floppy
+injection via the RHEVm dashboard "Custom Properties". The format
+of the Custom Properties entry must be:
+"floppyinject=user-data.txt:<base64 encoded data>"
+
+e.g.: To pass a simple bash script
+
+% cat simple_script.bash
+#!/bin/bash
+echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt
+
+% base64 < simple_script.bash
+IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
+
+To pass this example script to cloud-init running in a RHEVm v3.0 VM
+set the "Custom Properties" when creating the RHEMv v3.0 VM to:
+floppyinject=user-data.txt:IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
+
+NOTE: The prefix with file name must be: "floppyinject=user-data.txt:"
+
+It is also possible to launch a RHEVm v3.0 VM and pass optional user
+data to it using the Delta Cloud.
+For more inforation on Delta Cloud see: http://deltacloud.apache.org
+
+vSphere:
+========
+For VMWare's vSphere the userdata is injected into the VM an ISO
+via the cdrom. This can be done using the vSphere dashboard
+by connecting an ISO image to the CD/DVD drive.
+
+To pass this example script to cloud-init running in a vSphere VM
+set the CD/DVD drive when creating the vSphere VM to point to an
+ISO on the data store.
+
+The ISO must contain the user data:
+
+For example, to pass the same simple_script.bash to vSphere:
+
+Create the ISO:
+===============
+% mkdir my-iso
+
+NOTE: The file name on the ISO must be: "user-data.txt"
+% cp simple_scirpt.bash my-iso/user-data.txt
+
+% genisoimage -o user-data.iso -r my-iso
+
+Verify the ISO:
+===============
+% sudo mkdir /media/vsphere_iso
+% sudo mount -o loop JoeV_CI_02.iso /media/vsphere_iso
+% cat /media/vsphere_iso/user-data.txt
+% sudo umount /media/vsphere_iso
+
+Then, launch the vSphere VM the ISO user-data.iso attached as a CDrom.
+
+It is also possible to launch a vSphere VM and pass optional user
+data to it using the Delta Cloud.
+
+For more inforation on Delta Cloud see: http://deltacloud.apache.org
diff --git a/doc/configdrive/README b/doc/sources/configdrive/README
index ed9033c9..ed9033c9 100644
--- a/doc/configdrive/README
+++ b/doc/sources/configdrive/README
diff --git a/doc/kernel-cmdline.txt b/doc/sources/kernel-cmdline.txt
index 0b77a9af..0b77a9af 100644
--- a/doc/kernel-cmdline.txt
+++ b/doc/sources/kernel-cmdline.txt
diff --git a/doc/nocloud/README b/doc/sources/nocloud/README
index c94b206a..c94b206a 100644
--- a/doc/nocloud/README
+++ b/doc/sources/nocloud/README
diff --git a/doc/ovf/README b/doc/sources/ovf/README
index e3ef12e0..e3ef12e0 100644
--- a/doc/ovf/README
+++ b/doc/sources/ovf/README
diff --git a/doc/ovf/example/ovf-env.xml b/doc/sources/ovf/example/ovf-env.xml
index 13e8f104..13e8f104 100644
--- a/doc/ovf/example/ovf-env.xml
+++ b/doc/sources/ovf/example/ovf-env.xml
diff --git a/doc/ovf/example/ubuntu-server.ovf b/doc/sources/ovf/example/ubuntu-server.ovf
index 846483a1..846483a1 100644
--- a/doc/ovf/example/ubuntu-server.ovf
+++ b/doc/sources/ovf/example/ubuntu-server.ovf
diff --git a/doc/ovf/make-iso b/doc/sources/ovf/make-iso
index 91d0e2e5..91d0e2e5 100755
--- a/doc/ovf/make-iso
+++ b/doc/sources/ovf/make-iso
diff --git a/doc/ovf/ovf-env.xml.tmpl b/doc/sources/ovf/ovf-env.xml.tmpl
index 8e255d43..8e255d43 100644
--- a/doc/ovf/ovf-env.xml.tmpl
+++ b/doc/sources/ovf/ovf-env.xml.tmpl
diff --git a/doc/ovf/ovfdemo.pem b/doc/sources/ovf/ovfdemo.pem
index 5bc629c8..5bc629c8 100644
--- a/doc/ovf/ovfdemo.pem
+++ b/doc/sources/ovf/ovfdemo.pem
diff --git a/doc/ovf/user-data b/doc/sources/ovf/user-data
index bfac51fd..bfac51fd 100644
--- a/doc/ovf/user-data
+++ b/doc/sources/ovf/user-data
diff --git a/setup.py b/setup.py
index 06b897a5..24476681 100755
--- a/setup.py
+++ b/setup.py
@@ -23,12 +23,10 @@
from glob import glob
import os
-import re
import setuptools
from setuptools.command.install import install
-from distutils.command.install_data import install_data
from distutils.errors import DistutilsArgError
import subprocess
@@ -39,9 +37,9 @@ def is_f(p):
INITSYS_FILES = {
- 'sysvinit': filter((lambda x: is_f(x)), glob('sysvinit/*')),
- 'systemd': filter((lambda x: is_f(x)), glob('systemd/*')),
- 'upstart': filter((lambda x: is_f(x)), glob('upstart/*')),
+ 'sysvinit': [f for f in glob('sysvinit/*') if is_f(f)],
+ 'systemd': [f for f in glob('systemd/*') if is_f(f)],
+ 'upstart': [f for f in glob('upstart/*') if is_f(f)],
}
INITSYS_ROOTS = {
'sysvinit': '/etc/rc.d/init.d',
@@ -70,17 +68,18 @@ def tiny_p(cmd, capture=True):
def get_version():
cmd = ['tools/read-version']
(ver, _e) = tiny_p(cmd)
- return ver.strip()
+ return str(ver).strip()
def read_requires():
cmd = ['tools/read-dependencies']
(deps, _e) = tiny_p(cmd)
- return deps.splitlines()
+ return str(deps).splitlines()
# TODO: Is there a better way to do this??
class InitsysInstallData(install):
+ init_system = None
user_options = install.user_options + [
# This will magically show up in member variable 'init_sys'
('init-system=', None,
@@ -96,13 +95,12 @@ class InitsysInstallData(install):
def finalize_options(self):
install.finalize_options(self)
if self.init_system and self.init_system not in INITSYS_TYPES:
- raise DistutilsArgError(
- ("You must specify one of (%s) when"
- " specifying a init system!") % (", ".join(INITSYS_TYPES))
- )
+ raise DistutilsArgError(("You must specify one of (%s) when"
+ " specifying a init system!") % (", ".join(INITSYS_TYPES)))
elif self.init_system:
- self.distribution.data_files.append((INITSYS_ROOTS[self.init_system],
- INITSYS_FILES[self.init_system]))
+ self.distribution.data_files.append(
+ (INITSYS_ROOTS[self.init_system],
+ INITSYS_FILES[self.init_system]))
# Force that command to reinitalize (with new file list)
self.distribution.reinitialize_command('install_data', True)
@@ -123,11 +121,15 @@ setuptools.setup(name='cloud-init',
('/etc/cloud/templates', glob('templates/*')),
('/usr/share/cloud-init', []),
('/usr/lib/cloud-init',
- ['tools/uncloud-init', 'tools/write-ssh-key-fingerprints']),
- ('/usr/share/doc/cloud-init', filter(is_f, glob('doc/*'))),
- ('/usr/share/doc/cloud-init/examples', filter(is_f, glob('doc/examples/*'))),
- ('/usr/share/doc/cloud-init/examples/seed', filter(is_f, glob('doc/examples/seed/*'))),
- ],
+ ['tools/uncloud-init',
+ 'tools/write-ssh-key-fingerprints']),
+ ('/usr/share/doc/cloud-init',
+ [f for f in glob('doc/*') if is_f(f)]),
+ ('/usr/share/doc/cloud-init/examples',
+ [f for f in glob('doc/examples/*') if is_f(f)]),
+ ('/usr/share/doc/cloud-init/examples/seed',
+ [f for f in glob('doc/examples/seed/*') if is_f(f)]),
+ ],
install_requires=read_requires(),
cmdclass = {
# Use a subclass for install that handles
diff --git a/templates/sources.list.tmpl b/templates/sources.list.tmpl
index f702025f..ce395b3d 100644
--- a/templates/sources.list.tmpl
+++ b/templates/sources.list.tmpl
@@ -52,9 +52,9 @@ deb-src $mirror $codename-updates universe
# deb http://archive.canonical.com/ubuntu $codename partner
# deb-src http://archive.canonical.com/ubuntu $codename partner
-deb http://security.ubuntu.com/ubuntu $codename-security main
-deb-src http://security.ubuntu.com/ubuntu $codename-security main
-deb http://security.ubuntu.com/ubuntu $codename-security universe
-deb-src http://security.ubuntu.com/ubuntu $codename-security universe
-# deb http://security.ubuntu.com/ubuntu $codename-security multiverse
-# deb-src http://security.ubuntu.com/ubuntu $codename-security multiverse
+deb $security $codename-security main
+deb-src $security $codename-security main
+deb $security $codename-security universe
+deb-src $security $codename-security universe
+# deb $security $codename-security multiverse
+# deb-src $security $codename-security multiverse
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index af18955d..ac082076 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -1,6 +1,6 @@
-import StringIO
import logging
import os
+import StringIO
import sys
from mocker import MockerTestCase, ANY, ARGS, KWARGS
@@ -50,24 +50,27 @@ class TestWalkerHandleHandler(MockerTestCase):
self.payload = "dummy payload"
# Mock the write_file function
- write_file_mock = self.mocker.replace(util.write_file, passthrough=False)
+ write_file_mock = self.mocker.replace(util.write_file,
+ passthrough=False)
write_file_mock(expected_file_fullname, self.payload, 0600)
def test_no_errors(self):
"""Payload gets written to file and added to C{pdata}."""
- import_mock = self.mocker.replace(importer.import_module, passthrough=False)
+ import_mock = self.mocker.replace(importer.import_module,
+ passthrough=False)
import_mock(self.expected_module_name)
self.mocker.result(self.module_fake)
self.mocker.replay()
-
+
handlers.walker_handle_handler(self.data, self.ctype, self.filename,
self.payload)
-
+
self.assertEqual(1, self.data["handlercount"])
-
+
def test_import_error(self):
- """Module import errors are logged. No handler added to C{pdata}"""
- import_mock = self.mocker.replace(importer.import_module, passthrough=False)
+ """Module import errors are logged. No handler added to C{pdata}."""
+ import_mock = self.mocker.replace(importer.import_module,
+ passthrough=False)
import_mock(self.expected_module_name)
self.mocker.throw(ImportError())
self.mocker.replay()
@@ -78,8 +81,9 @@ class TestWalkerHandleHandler(MockerTestCase):
self.assertEqual(0, self.data["handlercount"])
def test_attribute_error(self):
- """Attribute errors are logged. No handler added to C{pdata}"""
- import_mock = self.mocker.replace(importer.import_module, passthrough=False)
+ """Attribute errors are logged. No handler added to C{pdata}."""
+ import_mock = self.mocker.replace(importer.import_module,
+ passthrough=False)
import_mock(self.expected_module_name)
self.mocker.result(self.module_fake)
self.mocker.throw(AttributeError())
@@ -152,7 +156,7 @@ class TestHandlerHandlePart(MockerTestCase):
self.payload, self.frequency)
def test_no_handle_when_modfreq_once(self):
- """C{handle_part} is not called if frequency is once"""
+ """C{handle_part} is not called if frequency is once."""
self.frequency = "once"
mod_mock = self.mocker.mock()
getattr(mod_mock, "frequency")
@@ -185,13 +189,15 @@ class TestCmdlineUrl(MockerTestCase):
payload = "0"
cmdline = "ro %s=%s bar=1" % (key, url)
- mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False)
+ mock_readurl = self.mocker.replace(url_helper.readurl,
+ passthrough=False)
mock_readurl(url)
self.mocker.result(url_helper.UrlResponse(200, payload))
self.mocker.replay()
self.assertEqual((key, url, None),
- util.get_cmdline_url(names=[key], starts="xxxxxx", cmdline=cmdline))
+ util.get_cmdline_url(names=[key], starts="xxxxxx",
+ cmdline=cmdline))
def test_valid_content(self):
url = "http://example.com/foo"
@@ -199,7 +205,8 @@ class TestCmdlineUrl(MockerTestCase):
payload = "xcloud-config\nmydata: foo\nbar: wark\n"
cmdline = "ro %s=%s bar=1" % (key, url)
- mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False)
+ mock_readurl = self.mocker.replace(url_helper.readurl,
+ passthrough=False)
mock_readurl(url)
self.mocker.result(url_helper.UrlResponse(200, payload))
self.mocker.replay()
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index 84d85d4d..ebc0bd51 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -1,4 +1,4 @@
-"""Tests of the built-in user data handlers"""
+"""Tests of the built-in user data handlers."""
import os
@@ -6,7 +6,6 @@ from mocker import MockerTestCase
from cloudinit import handlers
from cloudinit import helpers
-from cloudinit import util
from cloudinit.handlers import upstart_job
@@ -34,7 +33,7 @@ class TestBuiltins(MockerTestCase):
None, None, None)
self.assertEquals(0, len(os.listdir(up_root)))
- def test_upstart_frequency_single(self):
+ def test_upstart_frequency_single(self):
c_root = self.makeDir()
up_root = self.makeDir()
paths = helpers.Paths({
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
new file mode 100644
index 00000000..bda61c7e
--- /dev/null
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -0,0 +1,445 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joe VLcek <JVLcek@RedHat.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+'''
+This test file exercises the code in sources DataSourceAltCloud.py
+'''
+
+import os
+import shutil
+import tempfile
+
+from cloudinit import helpers
+from unittest import TestCase
+
+# Get the cloudinit.sources.DataSourceAltCloud import items needed.
+import cloudinit.sources.DataSourceAltCloud
+from cloudinit.sources.DataSourceAltCloud import DataSourceAltCloud
+from cloudinit.sources.DataSourceAltCloud import read_user_data_callback
+
+
+def _write_cloud_info_file(value):
+ '''
+ Populate the CLOUD_INFO_FILE which would be populated
+ with a cloud backend identifier ImageFactory when building
+ an image with ImageFactory.
+ '''
+ cifile = open(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 'w')
+ cifile.write(value)
+ cifile.close()
+ os.chmod(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 0664)
+
+
+def _remove_cloud_info_file():
+ '''
+ Remove the test CLOUD_INFO_FILE
+ '''
+ os.remove(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE)
+
+
+def _write_user_data_files(mount_dir, value):
+ '''
+ Populate the deltacloud_user_data_file the user_data_file
+ which would be populated with user data.
+ '''
+ deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
+ user_data_file = mount_dir + '/user-data.txt'
+
+ udfile = open(deltacloud_user_data_file, 'w')
+ udfile.write(value)
+ udfile.close()
+ os.chmod(deltacloud_user_data_file, 0664)
+
+ udfile = open(user_data_file, 'w')
+ udfile.write(value)
+ udfile.close()
+ os.chmod(user_data_file, 0664)
+
+
+def _remove_user_data_files(mount_dir,
+ dc_file=True,
+ non_dc_file=True):
+ '''
+ Remove the test files: deltacloud_user_data_file and
+ user_data_file
+ '''
+ deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
+ user_data_file = mount_dir + '/user-data.txt'
+
+ # Ignore any failures removeing files that are already gone.
+ if dc_file:
+ try:
+ os.remove(deltacloud_user_data_file)
+ except OSError:
+ pass
+
+ if non_dc_file:
+ try:
+ os.remove(user_data_file)
+ except OSError:
+ pass
+
+
+class TestGetCloudType(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.get_cloud_type()
+ '''
+
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+
+ def tearDown(self):
+ # Reset
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['dmidecode', '--string', 'system-product-name']
+
+ def test_rhev(self):
+ '''
+ Test method get_cloud_type() for RHEVm systems.
+ Forcing dmidecode return to match a RHEVm system: RHEV Hypervisor
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'RHEV Hypervisor']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('RHEV', \
+ dsrc.get_cloud_type())
+
+ def test_vsphere(self):
+ '''
+ Test method get_cloud_type() for vSphere systems.
+ Forcing dmidecode return to match a vSphere system: RHEV Hypervisor
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'VMware Virtual Platform']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('VSPHERE', \
+ dsrc.get_cloud_type())
+
+ def test_unknown(self):
+ '''
+ Test method get_cloud_type() for unknown systems.
+ Forcing dmidecode return to match an unrecognized return.
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'Unrecognized Platform']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('UNKNOWN', \
+ dsrc.get_cloud_type())
+
+ def test_exception1(self):
+ '''
+ Test method get_cloud_type() where command dmidecode fails.
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['ls', 'bad command']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('UNKNOWN', \
+ dsrc.get_cloud_type())
+
+ def test_exception2(self):
+ '''
+ Test method get_cloud_type() where command dmidecode is not available.
+ '''
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['bad command']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals('UNKNOWN', \
+ dsrc.get_cloud_type())
+
+
+class TestGetDataCloudInfoFile(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.get_data()
+ With a contrived CLOUD_INFO_FILE
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ self.cloud_info_file = tempfile.mkstemp()[1]
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ self.cloud_info_file
+
+ def tearDown(self):
+ # Reset
+
+ # Attempt to remove the temp file ignoring errors
+ try:
+ os.remove(self.cloud_info_file)
+ except OSError:
+ pass
+
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ '/etc/sysconfig/cloud-info'
+
+ def test_rhev(self):
+ '''Success Test module get_data() forcing RHEV.'''
+
+ _write_cloud_info_file('RHEV')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_rhevm = lambda: True
+ self.assertEquals(True, dsrc.get_data())
+
+ def test_vsphere(self):
+ '''Success Test module get_data() forcing VSPHERE.'''
+
+ _write_cloud_info_file('VSPHERE')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_vsphere = lambda: True
+ self.assertEquals(True, dsrc.get_data())
+
+ def test_fail_rhev(self):
+ '''Failure Test module get_data() forcing RHEV.'''
+
+ _write_cloud_info_file('RHEV')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_rhevm = lambda: False
+ self.assertEquals(False, dsrc.get_data())
+
+ def test_fail_vsphere(self):
+ '''Failure Test module get_data() forcing VSPHERE.'''
+
+ _write_cloud_info_file('VSPHERE')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_vsphere = lambda: False
+ self.assertEquals(False, dsrc.get_data())
+
+ def test_unrecognized(self):
+ '''Failure Test module get_data() forcing unrecognized.'''
+
+ _write_cloud_info_file('unrecognized')
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals(False, dsrc.get_data())
+
+
+class TestGetDataNoCloudInfoFile(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.get_data()
+ Without a CLOUD_INFO_FILE
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ 'no such file'
+
+ def tearDown(self):
+ # Reset
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ '/etc/sysconfig/cloud-info'
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['dmidecode', '--string', 'system-product-name']
+
+ def test_rhev_no_cloud_file(self):
+ '''Test No cloud info file module get_data() forcing RHEV.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'RHEV Hypervisor']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_rhevm = lambda: True
+ self.assertEquals(True, dsrc.get_data())
+
+ def test_vsphere_no_cloud_file(self):
+ '''Test No cloud info file module get_data() forcing VSPHERE.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'VMware Virtual Platform']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_vsphere = lambda: True
+ self.assertEquals(True, dsrc.get_data())
+
+ def test_failure_no_cloud_file(self):
+ '''Test No cloud info file module get_data() forcing unrecognized.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
+ ['echo', 'Unrecognized Platform']
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+ self.assertEquals(False, dsrc.get_data())
+
+
+class TestUserDataRhevm(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.user_data_rhevm()
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ self.mount_dir = tempfile.mkdtemp()
+
+ _write_user_data_files(self.mount_dir, 'test user data')
+
+ def tearDown(self):
+ # Reset
+
+ _remove_user_data_files(self.mount_dir)
+
+ # Attempt to remove the temp dir ignoring errors
+ try:
+ shutil.rmtree(self.mount_dir)
+ except OSError:
+ pass
+
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ '/etc/sysconfig/cloud-info'
+ cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
+ ['/sbin/modprobe', 'floppy']
+ cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
+ ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
+
+ def test_mount_cb_fails(self):
+ '''Test user_data_rhevm() where mount_cb fails.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
+ ['echo', 'modprobe floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+ def test_modprobe_fails(self):
+ '''Test user_data_rhevm() where modprobe fails.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
+ ['ls', 'modprobe floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+ def test_no_modprobe_cmd(self):
+ '''Test user_data_rhevm() with no modprobe command.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
+ ['bad command', 'modprobe floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+ def test_udevadm_fails(self):
+ '''Test user_data_rhevm() where udevadm fails.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
+ ['ls', 'udevadm floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+ def test_no_udevadm_cmd(self):
+ '''Test user_data_rhevm() with no udevadm command.'''
+
+ cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
+ ['bad command', 'udevadm floppy']
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_rhevm())
+
+
+class TestUserDataVsphere(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.user_data_vsphere()
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ self.mount_dir = tempfile.mkdtemp()
+
+ _write_user_data_files(self.mount_dir, 'test user data')
+
+ def tearDown(self):
+ # Reset
+
+ _remove_user_data_files(self.mount_dir)
+
+ # Attempt to remove the temp dir ignoring errors
+ try:
+ shutil.rmtree(self.mount_dir)
+ except OSError:
+ pass
+
+ cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ '/etc/sysconfig/cloud-info'
+
+ def test_user_data_vsphere(self):
+ '''Test user_data_vsphere() where mount_cb fails.'''
+
+ cloudinit.sources.DataSourceAltCloud.MEDIA_DIR = self.mount_dir
+
+ dsrc = DataSourceAltCloud({}, None, self.paths)
+
+ self.assertEquals(False, dsrc.user_data_vsphere())
+
+
+class TestReadUserDataCallback(TestCase):
+ '''
+ Test to exercise method: DataSourceAltCloud.read_user_data_callback()
+ '''
+ def setUp(self):
+ '''Set up.'''
+ self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ self.mount_dir = tempfile.mkdtemp()
+
+ _write_user_data_files(self.mount_dir, 'test user data')
+
+ def tearDown(self):
+ # Reset
+
+ _remove_user_data_files(self.mount_dir)
+
+ # Attempt to remove the temp dir ignoring errors
+ try:
+ shutil.rmtree(self.mount_dir)
+ except OSError:
+ pass
+
+ def test_callback_both(self):
+ '''Test read_user_data_callback() with both files.'''
+
+ self.assertEquals('test user data',
+ read_user_data_callback(self.mount_dir))
+
+ def test_callback_dc(self):
+ '''Test read_user_data_callback() with only DC file.'''
+
+ _remove_user_data_files(self.mount_dir,
+ dc_file=False,
+ non_dc_file=True)
+
+ self.assertEquals('test user data',
+ read_user_data_callback(self.mount_dir))
+
+ def test_callback_non_dc(self):
+ '''Test read_user_data_callback() with only non-DC file.'''
+
+ _remove_user_data_files(self.mount_dir,
+ dc_file=True,
+ non_dc_file=False)
+
+ self.assertEquals('test user data',
+ read_user_data_callback(self.mount_dir))
+
+ def test_callback_none(self):
+ '''Test read_user_data_callback() no files are found.'''
+
+ _remove_user_data_files(self.mount_dir)
+ self.assertEquals(None, read_user_data_callback(self.mount_dir))
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
new file mode 100644
index 00000000..55573114
--- /dev/null
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -0,0 +1,177 @@
+from copy import copy
+import json
+import os
+import os.path
+import shutil
+import tempfile
+from unittest import TestCase
+
+from cloudinit.sources import DataSourceConfigDrive as ds
+from cloudinit import util
+
+
+PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
+EC2_META = {
+ 'ami-id': 'ami-00000001',
+ 'ami-launch-index': 0,
+ 'ami-manifest-path': 'FIXME',
+ 'block-device-mapping': {
+ 'ami': 'sda1',
+ 'ephemeral0': 'sda2',
+ 'root': '/dev/sda1',
+ 'swap': 'sda3'},
+ 'hostname': 'sm-foo-test.novalocal',
+ 'instance-action': 'none',
+ 'instance-id': 'i-00000001',
+ 'instance-type': 'm1.tiny',
+ 'local-hostname': 'sm-foo-test.novalocal',
+ 'local-ipv4': None,
+ 'placement': {'availability-zone': 'nova'},
+ 'public-hostname': 'sm-foo-test.novalocal',
+ 'public-ipv4': '',
+ 'public-keys': {'0': {'openssh-key': PUBKEY}},
+ 'reservation-id': 'r-iru5qm4m',
+ 'security-groups': ['default']
+}
+USER_DATA = '#!/bin/sh\necho This is user data\n'
+OSTACK_META = {
+ 'availability_zone': 'nova',
+ 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
+ {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
+ 'hostname': 'sm-foo-test.novalocal',
+ 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
+ 'name': 'sm-foo-test',
+ 'public_keys': {'mykey': PUBKEY},
+ 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
+
+CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
+CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
+
+CFG_DRIVE_FILES_V2 = {
+ 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
+ 'ec2/2009-04-04/user-data': USER_DATA,
+ 'ec2/latest/meta-data.json': json.dumps(EC2_META),
+ 'ec2/latest/user-data': USER_DATA,
+ 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/2012-08-10/user_data': USER_DATA,
+ 'openstack/content/0000': CONTENT_0,
+ 'openstack/content/0001': CONTENT_1,
+ 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/latest/user_data': USER_DATA}
+
+
+class TestConfigDriveDataSource(TestCase):
+
+ def setUp(self):
+ super(TestConfigDriveDataSource, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+
+ def tearDown(self):
+ try:
+ shutil.rmtree(self.tmp)
+ except OSError:
+ pass
+
+ def test_dir_valid(self):
+ """Verify a dir is read as such."""
+
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+
+ found = ds.read_config_drive_dir(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md['instance-id'] = expected_md['uuid']
+
+ self.assertEqual(USER_DATA, found['userdata'])
+ self.assertEqual(expected_md, found['metadata'])
+ self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0)
+ self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1)
+
+ def test_seed_dir_valid_extra(self):
+ """Verify extra files do not affect datasource validity."""
+
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+
+ populate_dir(self.tmp, data)
+
+ found = ds.read_config_drive_dir(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md['instance-id'] = expected_md['uuid']
+
+ self.assertEqual(expected_md, found['metadata'])
+
+ def test_seed_dir_bad_json_metadata(self):
+ """Verify that bad json in metadata raises BrokenConfigDriveDir."""
+ data = copy(CFG_DRIVE_FILES_V2)
+
+ data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
+ data["openstack/latest/meta_data.json"] = "non-json garbage {}"
+
+ populate_dir(self.tmp, data)
+
+ self.assertRaises(ds.BrokenConfigDriveDir,
+ ds.read_config_drive_dir, self.tmp)
+
+ def test_seed_dir_no_configdrive(self):
+ """Verify that no metadata raises NonConfigDriveDir."""
+
+ my_d = os.path.join(self.tmp, "non-configdrive")
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+ data["content/foo"] = "foocontent"
+
+ self.assertRaises(ds.NonConfigDriveDir,
+ ds.read_config_drive_dir, my_d)
+
+ def test_seed_dir_missing(self):
+ """Verify that missing seed_dir raises NonConfigDriveDir."""
+ my_d = os.path.join(self.tmp, "nonexistantdirectory")
+ self.assertRaises(ds.NonConfigDriveDir,
+ ds.read_config_drive_dir, my_d)
+
+ def test_find_candidates(self):
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=config-2": ["/dev/vdb"],
+ }
+
+ def my_devs_with(criteria):
+ return devs_with_answers[criteria]
+
+ try:
+ orig_find_devs_with = util.find_devs_with
+ util.find_devs_with = my_devs_with
+
+ self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
+
+ # add a vfat item
+ # zdd reverse sorts after vdb, but config-2 label is preferred
+ devs_with_answers['TYPE=vfat'] = ["/dev/zdd"]
+ self.assertEqual(["/dev/vdb", "/dev/zdd"],
+ ds.find_candidate_devs())
+
+ # verify that partitions are not considered
+ devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
+ "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
+ self.assertEqual([], ds.find_candidate_devs())
+
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+
+def populate_dir(seed_dir, files):
+ for (name, content) in files.iteritems():
+ path = os.path.join(seed_dir, name)
+ dirname = os.path.dirname(path)
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+ with open(path, "w") as fp:
+ fp.write(content)
+ fp.close()
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index 261c410a..85e6add0 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -1,10 +1,8 @@
-import os
-from StringIO import StringIO
from copy import copy
+import os
-from cloudinit import util
-from cloudinit import url_helper
from cloudinit.sources import DataSourceMAAS
+from cloudinit import url_helper
from mocker import MockerTestCase
@@ -17,7 +15,7 @@ class TestMAASDataSource(MockerTestCase):
self.tmp = self.makeDir()
def test_seed_dir_valid(self):
- """Verify a valid seeddir is read as such"""
+ """Verify a valid seeddir is read as such."""
data = {'instance-id': 'i-valid01',
'local-hostname': 'valid01-hostname',
@@ -37,7 +35,7 @@ class TestMAASDataSource(MockerTestCase):
self.assertFalse(('user-data' in metadata))
def test_seed_dir_valid_extra(self):
- """Verify extra files do not affect seed_dir validity """
+ """Verify extra files do not affect seed_dir validity."""
data = {'instance-id': 'i-valid-extra',
'local-hostname': 'valid-extra-hostname',
@@ -56,7 +54,7 @@ class TestMAASDataSource(MockerTestCase):
self.assertFalse(('foo' in metadata))
def test_seed_dir_invalid(self):
- """Verify that invalid seed_dir raises MAASSeedDirMalformed"""
+ """Verify that invalid seed_dir raises MAASSeedDirMalformed."""
valid = {'instance-id': 'i-instanceid',
'local-hostname': 'test-hostname', 'user-data': ''}
@@ -80,20 +78,20 @@ class TestMAASDataSource(MockerTestCase):
DataSourceMAAS.read_maas_seed_dir, my_d)
def test_seed_dir_none(self):
- """Verify that empty seed_dir raises MAASSeedDirNone"""
+ """Verify that empty seed_dir raises MAASSeedDirNone."""
my_d = os.path.join(self.tmp, "valid_empty")
self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
DataSourceMAAS.read_maas_seed_dir, my_d)
def test_seed_dir_missing(self):
- """Verify that missing seed_dir raises MAASSeedDirNone"""
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
+ """Verify that missing seed_dir raises MAASSeedDirNone."""
+ self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
DataSourceMAAS.read_maas_seed_dir,
os.path.join(self.tmp, "nonexistantdirectory"))
def test_seed_url_valid(self):
- """Verify that valid seed_url is read as such"""
+ """Verify that valid seed_url is read as such."""
valid = {'meta-data/instance-id': 'i-instanceid',
'meta-data/local-hostname': 'test-hostname',
'meta-data/public-keys': 'test-hostname',
@@ -131,11 +129,11 @@ class TestMAASDataSource(MockerTestCase):
valid['meta-data/local-hostname'])
def test_seed_url_invalid(self):
- """Verify that invalid seed_url raises MAASSeedDirMalformed"""
+ """Verify that invalid seed_url raises MAASSeedDirMalformed."""
pass
def test_seed_url_missing(self):
- """Verify seed_url with no found entries raises MAASSeedDirNone"""
+ """Verify seed_url with no found entries raises MAASSeedDirNone."""
pass
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
new file mode 100644
index 00000000..2df4c2f0
--- /dev/null
+++ b/tests/unittests/test_distros/test_generic.py
@@ -0,0 +1,121 @@
+from mocker import MockerTestCase
+
+from cloudinit import distros
+
+unknown_arch_info = {
+ 'arches': ['default'],
+ 'failsafe': {'primary': 'http://fs-primary-default',
+ 'security': 'http://fs-security-default'}
+}
+
+package_mirrors = [
+ {'arches': ['i386', 'amd64'],
+ 'failsafe': {'primary': 'http://fs-primary-intel',
+ 'security': 'http://fs-security-intel'},
+ 'search': {
+ 'primary': ['http://%(ec2_region)s.ec2/',
+ 'http://%(availability_zone)s.clouds/'],
+ 'security': ['http://security-mirror1-intel',
+ 'http://security-mirror2-intel']}},
+ {'arches': ['armhf', 'armel'],
+ 'failsafe': {'primary': 'http://fs-primary-arm',
+ 'security': 'http://fs-security-arm'}},
+ unknown_arch_info
+]
+
+gpmi = distros._get_package_mirror_info # pylint: disable=W0212
+gapmi = distros._get_arch_package_mirror_info # pylint: disable=W0212
+
+
+class TestGenericDistro(MockerTestCase):
+
+ def return_first(self, mlist):
+ if not mlist:
+ return None
+ return mlist[0]
+
+ def return_second(self, mlist):
+ if not mlist:
+ return None
+ return mlist[1]
+
+ def return_none(self, _mlist):
+ return None
+
+ def return_last(self, mlist):
+ if not mlist:
+ return None
+ return(mlist[-1])
+
+ def setUp(self):
+ super(TestGenericDistro, self).setUp()
+ # Make a temp directoy for tests to use.
+ self.tmp = self.makeDir()
+
+ def test_arch_package_mirror_info_unknown(self):
+ """for an unknown arch, we should get back that with arch 'default'."""
+ arch_mirrors = gapmi(package_mirrors, arch="unknown")
+ self.assertEqual(unknown_arch_info, arch_mirrors)
+
+ def test_arch_package_mirror_info_known(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ self.assertEqual(package_mirrors[0], arch_mirrors)
+
+ def test_get_package_mirror_info_az_ec2(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+
+ results = gpmi(arch_mirrors, availability_zone="us-east-1a",
+ mirror_filter=self.return_first)
+ self.assertEqual(results,
+ {'primary': 'http://us-east-1.ec2/',
+ 'security': 'http://security-mirror1-intel'})
+
+ results = gpmi(arch_mirrors, availability_zone="us-east-1a",
+ mirror_filter=self.return_second)
+ self.assertEqual(results,
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror2-intel'})
+
+ results = gpmi(arch_mirrors, availability_zone="us-east-1a",
+ mirror_filter=self.return_none)
+ self.assertEqual(results, package_mirrors[0]['failsafe'])
+
+ def test_get_package_mirror_info_az_non_ec2(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+
+ results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor",
+ mirror_filter=self.return_first)
+ self.assertEqual(results,
+ {'primary': 'http://nova.cloudvendor.clouds/',
+ 'security': 'http://security-mirror1-intel'})
+
+ results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor",
+ mirror_filter=self.return_last)
+ self.assertEqual(results,
+ {'primary': 'http://nova.cloudvendor.clouds/',
+ 'security': 'http://security-mirror2-intel'})
+
+ def test_get_package_mirror_info_none(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+
+ # because both search entries here replacement based on
+ # availability-zone, the filter will be called with an empty list and
+ # failsafe should be taken.
+ results = gpmi(arch_mirrors, availability_zone=None,
+ mirror_filter=self.return_first)
+ self.assertEqual(results,
+ {'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror1-intel'})
+
+ results = gpmi(arch_mirrors, availability_zone=None,
+ mirror_filter=self.return_last)
+ self.assertEqual(results,
+ {'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror2-intel'})
+
+
+#def _get_package_mirror_info(mirror_info, availability_zone=None,
+# mirror_filter=util.search_for_mirror):
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 1f96e992..d3df5c50 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -1,8 +1,8 @@
from mocker import MockerTestCase
-from cloudinit import util
from cloudinit import cloud
from cloudinit import helpers
+from cloudinit import util
from cloudinit.config import cc_ca_certs
@@ -26,7 +26,8 @@ class TestNoConfig(MockerTestCase):
self.mocker.replace(cc_ca_certs.update_ca_certs, passthrough=False)
self.mocker.replay()
- cc_ca_certs.handle(self.name, config, self.cloud_init, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, self.cloud_init, self.log,
+ self.args)
class TestConfig(MockerTestCase):
@@ -39,11 +40,12 @@ class TestConfig(MockerTestCase):
self.args = []
# Mock out the functions that actually modify the system
- self.mock_add = self.mocker.replace(cc_ca_certs.add_ca_certs, passthrough=False)
+ self.mock_add = self.mocker.replace(cc_ca_certs.add_ca_certs,
+ passthrough=False)
self.mock_update = self.mocker.replace(cc_ca_certs.update_ca_certs,
passthrough=False)
- self.mock_remove = self.mocker.replace(cc_ca_certs.remove_default_ca_certs,
- passthrough=False)
+ self.mock_remove = self.mocker.replace(
+ cc_ca_certs.remove_default_ca_certs, passthrough=False)
# Order must be correct
self.mocker.order()
@@ -62,7 +64,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_empty_trusted_list(self):
- """Test that no certificate are written if 'trusted' list is empty"""
+ """Test that no certificate are written if 'trusted' list is empty."""
config = {"ca-certs": {"trusted": []}}
# No functions should be called
@@ -72,7 +74,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_single_trusted(self):
- """Test that a single cert gets passed to add_ca_certs"""
+ """Test that a single cert gets passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1"]}}
self.mock_add(self.paths, ["CERT1"])
@@ -82,7 +84,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_multiple_trusted(self):
- """Test that multiple certs get passed to add_ca_certs"""
+ """Test that multiple certs get passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
self.mock_add(self.paths, ["CERT1", "CERT2"])
@@ -92,7 +94,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_remove_default_ca_certs(self):
- """Test remove_defaults works as expected"""
+ """Test remove_defaults works as expected."""
config = {"ca-certs": {"remove-defaults": True}}
self.mock_remove(self.paths)
@@ -102,7 +104,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_no_remove_defaults_if_false(self):
- """Test remove_defaults is not called when config value is False"""
+ """Test remove_defaults is not called when config value is False."""
config = {"ca-certs": {"remove-defaults": False}}
self.mock_update()
@@ -111,7 +113,7 @@ class TestConfig(MockerTestCase):
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_correct_order_for_remove_then_add(self):
- """Test remove_defaults is not called when config value is False"""
+ """Test remove_defaults is not called when config value is False."""
config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}}
self.mock_remove(self.paths)
@@ -137,7 +139,7 @@ class TestAddCaCerts(MockerTestCase):
cc_ca_certs.add_ca_certs(self.paths, [])
def test_single_cert(self):
- """Test adding a single certificate to the trusted CAs"""
+ """Test adding a single certificate to the trusted CAs."""
cert = "CERT1\nLINE2\nLINE3"
mock_write = self.mocker.replace(util.write_file, passthrough=False)
@@ -150,7 +152,7 @@ class TestAddCaCerts(MockerTestCase):
cc_ca_certs.add_ca_certs(self.paths, [cert])
def test_multiple_certs(self):
- """Test adding multiple certificates to the trusted CAs"""
+ """Test adding multiple certificates to the trusted CAs."""
certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
expected_cert_file = "\n".join(certs)
@@ -183,8 +185,8 @@ class TestRemoveDefaultCaCerts(MockerTestCase):
})
def test_commands(self):
- mock_delete_dir_contents = self.mocker.replace(util.delete_dir_contents,
- passthrough=False)
+ mock_delete_dir_contents = self.mocker.replace(
+ util.delete_dir_contents, passthrough=False)
mock_write = self.mocker.replace(util.write_file, passthrough=False)
mock_subp = self.mocker.replace(util.subp,
passthrough=False)
diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py
index 861642b6..82a4c555 100644
--- a/tests/unittests/test_userdata.py
+++ b/tests/unittests/test_userdata.py
@@ -1,21 +1,17 @@
-"""Tests for handling of userdata within cloud init"""
+"""Tests for handling of userdata within cloud init."""
import StringIO
import logging
import os
-import shutil
-import tempfile
from email.mime.base import MIMEBase
from mocker import MockerTestCase
-from cloudinit import helpers
from cloudinit import log
from cloudinit import sources
from cloudinit import stages
-from cloudinit import util
INSTANCE_ID = "i-testing"
@@ -58,7 +54,7 @@ class TestConsumeUserData(MockerTestCase):
return log_file
def test_unhandled_type_warning(self):
- """Raw text without magic is ignored but shows warning"""
+ """Raw text without magic is ignored but shows warning."""
ci = stages.Init()
data = "arbitrary text\n"
ci.datasource = FakeDataSource(data)
@@ -74,7 +70,7 @@ class TestConsumeUserData(MockerTestCase):
log_file.getvalue())
def test_mime_text_plain(self):
- """Mime message of type text/plain is ignored but shows warning"""
+ """Mime message of type text/plain is ignored but shows warning."""
ci = stages.Init()
message = MIMEBase("text", "plain")
message.set_payload("Just text")
@@ -90,9 +86,8 @@ class TestConsumeUserData(MockerTestCase):
"Unhandled unknown content-type (text/plain)",
log_file.getvalue())
-
def test_shellscript(self):
- """Raw text starting #!/bin/sh is treated as script"""
+ """Raw text starting #!/bin/sh is treated as script."""
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
ci.datasource = FakeDataSource(script)
@@ -108,7 +103,7 @@ class TestConsumeUserData(MockerTestCase):
self.assertEqual("", log_file.getvalue())
def test_mime_text_x_shellscript(self):
- """Mime message of type text/x-shellscript is treated as script"""
+ """Mime message of type text/x-shellscript is treated as script."""
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
message = MIMEBase("text", "x-shellscript")
@@ -126,7 +121,7 @@ class TestConsumeUserData(MockerTestCase):
self.assertEqual("", log_file.getvalue())
def test_mime_text_plain_shell(self):
- """Mime type text/plain starting #!/bin/sh is treated as script"""
+ """Mime type text/plain starting #!/bin/sh is treated as script."""
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
message = MIMEBase("text", "plain")
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 93979f06..15fcbd26 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,11 +1,11 @@
import os
import stat
-from unittest import TestCase
from mocker import MockerTestCase
+from unittest import TestCase
-from cloudinit import util
from cloudinit import importer
+from cloudinit import util
class FakeSelinux(object):
@@ -14,7 +14,7 @@ class FakeSelinux(object):
self.match_what = match_what
self.restored = []
- def matchpathcon(self, path, mode):
+ def matchpathcon(self, path, mode): # pylint: disable=W0613
if path == self.match_what:
return
else:
@@ -23,7 +23,7 @@ class FakeSelinux(object):
def is_selinux_enabled(self):
return True
- def restorecon(self, path, recursive):
+ def restorecon(self, path, recursive): # pylint: disable=W0613
self.restored.append(path)
diff --git a/tools/hacking.py b/tools/hacking.py
index d0c27d25..11163df3 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -23,11 +23,8 @@ built on top of pep8.py
import inspect
import logging
-import os
import re
import sys
-import tokenize
-import warnings
import pep8
@@ -103,7 +100,7 @@ def cloud_todo_format(physical_line):
"""
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
- pos2 = physical_line.find('#') # make sure it's a comment
+ pos2 = physical_line.find('#') # make sure it's a comment
if (pos != pos1 and pos2 >= 0 and pos2 < pos):
return pos, "N101: Use TODO(NAME)"
@@ -136,7 +133,6 @@ def cloud_docstring_multiline_end(physical_line):
return (pos, "N403: multi line docstring end on new line")
-
current_file = ""
@@ -158,7 +154,7 @@ def add_cloud():
if not inspect.isfunction(function):
continue
if name.startswith("cloud_"):
- exec("pep8.%s = %s" % (name, name))
+ exec("pep8.%s = %s" % (name, name)) # pylint: disable=W0122
if __name__ == "__main__":
# NOVA based 'hacking.py' error codes start with an N
@@ -167,9 +163,8 @@ if __name__ == "__main__":
pep8.current_file = current_file
pep8.readlines = readlines
try:
- pep8._main()
+ pep8._main() # pylint: disable=W0212
finally:
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
% len(_missingImport))
-
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index 4548e4ae..c79f0598 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -1,15 +1,15 @@
#!/usr/bin/python
# Provides a somewhat random, somewhat compat, somewhat useful mock version of
-#
-# http://docs.amazonwebservices.com/AWSEC2/2007-08-29/DeveloperGuide/AESDG-chapter-instancedata.html
+# http://docs.amazonwebservices.com
+# /AWSEC2/2007-08-29/DeveloperGuide/AESDG-chapter-instancedata.htm
"""
To use this to mimic the EC2 metadata service entirely, run it like:
- # Where 'eth0' is *some* interface.
+ # Where 'eth0' is *some* interface.
sudo ifconfig eth0:0 169.254.169.254 netmask 255.255.255.255
- sudo ./mock-meta -a 169.254.169.254 -p 80
+ sudo ./mock-meta.py -a 169.254.169.254 -p 80
Then:
wget -q http://169.254.169.254/latest/meta-data/instance-id -O -; echo
@@ -23,7 +23,7 @@ import json
import logging
import os
import random
-import string
+import string # pylint: disable=W0402
import sys
import yaml
@@ -84,12 +84,12 @@ META_CAPABILITIES = [
PUB_KEYS = {
'brickies': [
('ssh-rsa '
- 'AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T'
- '7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78'
- 'hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtv'
- 'EONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz'
- '3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SC'
- 'mXp5Kt5/82cD/VN3NtHw== brickies'),
+ 'AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemN'
+ 'Sj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxz'
+ 'xtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJ'
+ 'tO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7'
+ 'u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN'
+ '+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== brickies'),
'',
],
}
@@ -156,6 +156,8 @@ def traverse(keys, mp):
ID_CHARS = [c for c in (string.ascii_uppercase + string.digits)]
+
+
def id_generator(size=6, lower=False):
txt = ''.join(random.choice(ID_CHARS) for x in range(size))
if lower:
@@ -234,12 +236,12 @@ class MetaDataHandler(object):
elif action == 'public-keys':
nparams = params[1:]
# This is a weird kludge, why amazon why!!!
- # public-keys is messed up, a list of /latest/meta-data/public-keys/
- # shows something like: '0=brickies'
- # but a GET to /latest/meta-data/public-keys/0=brickies will fail
- # you have to know to get '/latest/meta-data/public-keys/0', then
- # from there you get a 'openssh-key', which you can get.
- # this hunk of code just re-works the object for that.
+ # public-keys is messed up, list of /latest/meta-data/public-keys/
+ # shows something like: '0=brickies'
+ # but a GET to /latest/meta-data/public-keys/0=brickies will fail
+ # you have to know to get '/latest/meta-data/public-keys/0', then
+ # from there you get a 'openssh-key', which you can get.
+ # this hunk of code just re-works the object for that.
avail_keys = get_ssh_keys()
key_ids = sorted(list(avail_keys.keys()))
if nparams:
@@ -248,13 +250,14 @@ class MetaDataHandler(object):
key_id = int(mybe_key)
key_name = key_ids[key_id]
except:
- raise WebException(httplib.BAD_REQUEST, "Unknown key id %r" % mybe_key)
+ raise WebException(httplib.BAD_REQUEST,
+ "Unknown key id %r" % mybe_key)
# Extract the possible sub-params
result = traverse(nparams[1:], {
"openssh-key": "\n".join(avail_keys[key_name]),
})
if isinstance(result, (dict)):
- # TODO: This might not be right??
+ # TODO(harlowja): This might not be right??
result = "\n".join(sorted(result.keys()))
if not result:
result = ''
@@ -303,13 +306,13 @@ class UserDataHandler(object):
blob = "\n".join(lines)
return blob.strip()
- def get_data(self, params, who, **kwargs):
+ def get_data(self, params, who, **kwargs): # pylint: disable=W0613
if not params:
return self._get_user_blob(who=who)
return NOT_IMPL_RESPONSE
-# Seem to need to use globals since can't pass
+# Seem to need to use globals since can't pass
# data into the request handlers instances...
# Puke!
meta_fetcher = None
@@ -323,14 +326,12 @@ class Ec2Handler(BaseHTTPRequestHandler):
versions = sorted(versions)
return "\n".join(versions)
- def log_message(self, format, *args):
- msg = "%s - %s" % (self.address_string(), format % (args))
+ def log_message(self, fmt, *args):
+ msg = "%s - %s" % (self.address_string(), fmt % (args))
log.info(msg)
def _find_method(self, path):
# Puke! (globals)
- global meta_fetcher
- global user_fetcher
func_mapping = {
'user-data': user_fetcher.get_data,
'meta-data': meta_fetcher.get_data,
@@ -341,12 +342,14 @@ class Ec2Handler(BaseHTTPRequestHandler):
return self._get_versions
date = segments[0].strip().lower()
if date not in self._get_versions():
- raise WebException(httplib.BAD_REQUEST, "Unknown version format %r" % date)
+ raise WebException(httplib.BAD_REQUEST,
+ "Unknown version format %r" % date)
if len(segments) < 2:
raise WebException(httplib.BAD_REQUEST, "No action provided")
look_name = segments[1].lower()
if look_name not in func_mapping:
- raise WebException(httplib.BAD_REQUEST, "Unknown requested data %r" % look_name)
+ raise WebException(httplib.BAD_REQUEST,
+ "Unknown requested data %r" % look_name)
base_func = func_mapping[look_name]
who = self.address_string()
ip_from = self.client_address[0]
@@ -371,7 +374,8 @@ class Ec2Handler(BaseHTTPRequestHandler):
self.send_response(httplib.OK)
self.send_header("Content-Type", "binary/octet-stream")
self.send_header("Content-Length", len(data))
- log.info("Sending data (len=%s):\n%s", len(data), format_text(data))
+ log.info("Sending data (len=%s):\n%s", len(data),
+ format_text(data))
self.end_headers()
self.wfile.write(data)
except RuntimeError as e:
@@ -389,22 +393,25 @@ class Ec2Handler(BaseHTTPRequestHandler):
self._do_response()
-def setup_logging(log_level, format='%(levelname)s: @%(name)s : %(message)s'):
+def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'):
root_logger = logging.getLogger()
console_logger = logging.StreamHandler(sys.stdout)
- console_logger.setFormatter(logging.Formatter(format))
+ console_logger.setFormatter(logging.Formatter(fmt))
root_logger.addHandler(console_logger)
root_logger.setLevel(log_level)
def extract_opts():
parser = OptionParser()
- parser.add_option("-p", "--port", dest="port", action="store", type=int, default=80,
- help="port from which to serve traffic (default: %default)", metavar="PORT")
- parser.add_option("-a", "--addr", dest="address", action="store", type=str, default='0.0.0.0',
- help="address from which to serve traffic (default: %default)", metavar="ADDRESS")
- parser.add_option("-f", '--user-data-file', dest='user_data_file', action='store',
- help="user data filename to serve back to incoming requests", metavar='FILE')
+ parser.add_option("-p", "--port", dest="port", action="store", type=int,
+ default=80, metavar="PORT",
+ help="port from which to serve traffic (default: %default)")
+ parser.add_option("-a", "--addr", dest="address", action="store", type=str,
+ default='0.0.0.0', metavar="ADDRESS",
+ help="address from which to serve traffic (default: %default)")
+ parser.add_option("-f", '--user-data-file', dest='user_data_file',
+ action='store', metavar='FILE',
+ help="user data filename to serve back to incoming requests")
(options, args) = parser.parse_args()
out = dict()
out['extra'] = args
@@ -420,14 +427,14 @@ def extract_opts():
def setup_fetchers(opts):
- global meta_fetcher
- global user_fetcher
+ global meta_fetcher # pylint: disable=W0603
+ global user_fetcher # pylint: disable=W0603
meta_fetcher = MetaDataHandler(opts)
user_fetcher = UserDataHandler(opts)
def run_server():
- # Using global here since it doesn't seem like we
+ # Using global here since it doesn't seem like we
# can pass opts into a request handler constructor...
opts = extract_opts()
setup_logging(logging.DEBUG)