summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/cloud.py11
-rw-r--r--cloudinit/config/cc_apt_pipelining.py4
-rw-r--r--cloudinit/config/cc_apt_update_upgrade.py136
-rw-r--r--cloudinit/config/cc_bootcmd.py2
-rw-r--r--cloudinit/config/cc_emit_upstart.py48
-rw-r--r--cloudinit/config/cc_final_message.py6
-rw-r--r--cloudinit/config/cc_landscape.py4
-rw-r--r--cloudinit/config/cc_mounts.py2
-rw-r--r--cloudinit/config/cc_puppet.py5
-rw-r--r--cloudinit/config/cc_resizefs.py8
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py4
-rw-r--r--cloudinit/config/cc_set_passwords.py16
-rw-r--r--cloudinit/config/cc_ssh.py23
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py96
-rw-r--r--cloudinit/config/cc_ssh_import_id.py68
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py2
-rw-r--r--cloudinit/config/cc_update_hostname.py2
-rw-r--r--cloudinit/config/cc_users_groups.py78
-rw-r--r--cloudinit/config/cc_write_files.py4
-rw-r--r--cloudinit/distros/__init__.py285
-rw-r--r--cloudinit/distros/debian.py27
-rw-r--r--cloudinit/distros/fedora.py3
-rw-r--r--cloudinit/distros/rhel.py4
-rw-r--r--cloudinit/distros/ubuntu.py8
-rw-r--r--cloudinit/filters/__init__.py21
-rw-r--r--cloudinit/filters/launch_index.py75
-rw-r--r--cloudinit/handlers/__init__.py2
-rw-r--r--cloudinit/handlers/shell_script.py2
-rw-r--r--cloudinit/log.py2
-rw-r--r--cloudinit/settings.py5
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py299
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py6
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py326
-rw-r--r--cloudinit/sources/DataSourceEc2.py48
-rw-r--r--cloudinit/sources/DataSourceNone.py61
-rw-r--r--cloudinit/sources/__init__.py44
-rw-r--r--cloudinit/ssh_util.py94
-rw-r--r--cloudinit/stages.py6
-rw-r--r--cloudinit/user_data.py86
-rw-r--r--cloudinit/util.py41
40 files changed, 1649 insertions, 315 deletions
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 22d9167e..95e0cfb2 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -70,21 +70,22 @@ class Cloud(object):
return fn
# The rest of thes are just useful proxies
- def get_userdata(self):
- return self.datasource.get_userdata()
+ def get_userdata(self, apply_filter=True):
+ return self.datasource.get_userdata(apply_filter)
def get_instance_id(self):
return self.datasource.get_instance_id()
+ @property
+ def launch_index(self):
+ return self.datasource.launch_index
+
def get_public_ssh_keys(self):
return self.datasource.get_public_ssh_keys()
def get_locale(self):
return self.datasource.get_locale()
- def get_local_mirror(self):
- return self.datasource.get_local_mirror()
-
def get_hostname(self, fqdn=False):
return self.datasource.get_hostname(fqdn=fqdn)
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 3426099e..02056ee0 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -16,8 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit import util
from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
frequency = PER_INSTANCE
@@ -50,7 +50,7 @@ def handle(_name, cfg, cloud, log, _args):
def write_apt_snippet(cloud, setting, log, f_name):
- """ Writes f_name with apt pipeline depth 'setting' """
+ """Writes f_name with apt pipeline depth 'setting'."""
file_contents = APT_PIPE_TPL % (setting)
diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py
index 1bffa47d..356bb98d 100644
--- a/cloudinit/config/cc_apt_update_upgrade.py
+++ b/cloudinit/config/cc_apt_update_upgrade.py
@@ -20,6 +20,7 @@
import glob
import os
+import time
from cloudinit import templater
from cloudinit import util
@@ -50,20 +51,25 @@ def handle(name, cfg, cloud, log, _args):
upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)
release = get_release()
- mirror = find_apt_mirror(cloud, cfg)
- if not mirror:
+ mirrors = find_apt_mirror_info(cloud, cfg)
+ if not mirrors or "primary" not in mirrors:
log.debug(("Skipping module named %s,"
" no package 'mirror' located"), name)
return
- log.debug("Selected mirror at: %s" % mirror)
+ # backwards compatibility
+ mirror = mirrors["primary"]
+ mirrors["mirror"] = mirror
+
+ log.debug("mirror info: %s" % mirrors)
if not util.get_cfg_option_bool(cfg,
'apt_preserve_sources_list', False):
- generate_sources_list(release, mirror, cloud, log)
- old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror',
- "archive.ubuntu.com/ubuntu")
- rename_apt_lists(old_mir, mirror)
+ generate_sources_list(release, mirrors, cloud, log)
+ old_mirrors = cfg.get('apt_old_mirrors',
+ {"primary": "archive.ubuntu.com/ubuntu",
+ "security": "security.ubuntu.com/ubuntu"})
+ rename_apt_lists(old_mirrors, mirrors)
# Set up any apt proxy
proxy = cfg.get("apt_proxy", None)
@@ -81,8 +87,10 @@ def handle(name, cfg, cloud, log, _args):
# Process 'apt_sources'
if 'apt_sources' in cfg:
- errors = add_sources(cloud, cfg['apt_sources'],
- {'MIRROR': mirror, 'RELEASE': release})
+ params = mirrors
+ params['RELEASE'] = release
+ params['MIRROR'] = mirror
+ errors = add_sources(cloud, cfg['apt_sources'], params)
for e in errors:
log.warn("Source Error: %s", ':'.join(e))
@@ -118,6 +126,20 @@ def handle(name, cfg, cloud, log, _args):
util.logexc(log, "Failed to install packages: %s ", pkglist)
errors.append(e)
+ # kernel and openssl (possibly some other packages)
+ # write a file /var/run/reboot-required after upgrading.
+ # if that file exists and configured, then just stop right now and reboot
+ # TODO(smoser): handle this less voilently
+ reboot_file = "/var/run/reboot-required"
+ if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and
+ os.path.isfile(reboot_file)):
+ log.warn("rebooting after upgrade or install per %s" % reboot_file)
+ time.sleep(1) # give the warning time to get out
+ util.subp(["/sbin/reboot"])
+ time.sleep(60)
+ log.warn("requested reboot did not happen!")
+ errors.append(Exception("requested reboot did not happen!"))
+
if len(errors):
log.warn("%s failed with exceptions, re-raising the last one",
len(errors))
@@ -146,15 +168,18 @@ def mirror2lists_fileprefix(mirror):
return string
-def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):
- oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror))
- nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror))
- if oprefix == nprefix:
- return
- olen = len(oprefix)
- for filename in glob.glob("%s_*" % oprefix):
- # TODO use the cloud.paths.join...
- util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
+def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
+ for (name, omirror) in old_mirrors.iteritems():
+ nmirror = new_mirrors.get(name)
+ if not nmirror:
+ continue
+ oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror))
+ nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror))
+ if oprefix == nprefix:
+ continue
+ olen = len(oprefix)
+ for filename in glob.glob("%s_*" % oprefix):
+ util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
def get_release():
@@ -162,14 +187,17 @@ def get_release():
return stdout.strip()
-def generate_sources_list(codename, mirror, cloud, log):
+def generate_sources_list(codename, mirrors, cloud, log):
template_fn = cloud.get_template_filename('sources.list')
- if template_fn:
- params = {'mirror': mirror, 'codename': codename}
- out_fn = cloud.paths.join(False, '/etc/apt/sources.list')
- templater.render_to_file(template_fn, out_fn, params)
- else:
+ if not template_fn:
log.warn("No template found, not rendering /etc/apt/sources.list")
+ return
+
+ params = {'codename': codename}
+ for k in mirrors:
+ params[k] = mirrors[k]
+ out_fn = cloud.paths.join(False, '/etc/apt/sources.list')
+ templater.render_to_file(template_fn, out_fn, params)
def add_sources(cloud, srclist, template_params=None):
@@ -231,43 +259,47 @@ def add_sources(cloud, srclist, template_params=None):
return errorlist
-def find_apt_mirror(cloud, cfg):
- """ find an apt_mirror given the cloud and cfg provided """
+def find_apt_mirror_info(cloud, cfg):
+ """find an apt_mirror given the cloud and cfg provided."""
mirror = None
- cfg_mirror = cfg.get("apt_mirror", None)
- if cfg_mirror:
- mirror = cfg["apt_mirror"]
- elif "apt_mirror_search" in cfg:
- mirror = util.search_for_mirror(cfg['apt_mirror_search'])
- else:
- mirror = cloud.get_local_mirror()
+ # this is less preferred way of specifying mirror preferred would be to
+ # use the distro's search or package_mirror.
+ mirror = cfg.get("apt_mirror", None)
- mydom = ""
+ search = cfg.get("apt_mirror_search", None)
+ if not mirror and search:
+ mirror = util.search_for_mirror(search)
+ if (not mirror and
+ util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
+ mydom = ""
doms = []
- if not mirror:
- # if we have a fqdn, then search its domain portion first
- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- mydom = ".".join(fqdn.split(".")[1:])
- if mydom:
- doms.append(".%s" % mydom)
+ # if we have a fqdn, then search its domain portion first
+ (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ mydom = ".".join(fqdn.split(".")[1:])
+ if mydom:
+ doms.append(".%s" % mydom)
+
+ doms.extend((".localdomain", "",))
- if (not mirror and
- util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
- doms.extend((".localdomain", "",))
+ mirror_list = []
+ distro = cloud.distro.name
+ mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
+ for post in doms:
+ mirror_list.append(mirrorfmt % (post))
- mirror_list = []
- distro = cloud.distro.name
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
- for post in doms:
- mirror_list.append(mirrorfmt % (post))
+ mirror = util.search_for_mirror(mirror_list)
- mirror = util.search_for_mirror(mirror_list)
+ mirror_info = cloud.datasource.get_package_mirror_info()
- if not mirror:
- mirror = cloud.distro.get_package_mirror()
+ # this is a bit strange.
+ # if mirror is set, then one of the legacy options above set it
+ # but they do not cover security. so we need to get that from
+ # get_package_mirror_info
+ if mirror:
+ mirror_info.update({'primary': mirror})
- return mirror
+ return mirror_info
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index bae1ea54..896cb4d0 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -20,8 +20,8 @@
import os
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
frequency = PER_ALWAYS
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
new file mode 100644
index 00000000..6d376184
--- /dev/null
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -0,0 +1,48 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
+
+frequency = PER_ALWAYS
+
+distros = ['ubuntu', 'debian']
+
+
+def handle(name, _cfg, cloud, log, args):
+ event_names = args
+ if not event_names:
+ # Default to the 'cloud-config'
+ # event for backwards compat.
+ event_names = ['cloud-config']
+ if not os.path.isfile("/sbin/initctl"):
+ log.debug(("Skipping module named %s,"
+ " no /sbin/initctl located"), name)
+ return
+ cfgpath = cloud.paths.get_ipath_cur("cloud_config")
+ for n in event_names:
+ cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
+ try:
+ util.subp(cmd)
+ except Exception as e:
+ # TODO(harlowja), use log exception from utils??
+ log.warn("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index aff03c4e..6b864fda 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -28,7 +28,7 @@ frequency = PER_ALWAYS
# Cheetah formated default message
FINAL_MESSAGE_DEF = ("Cloud-init v. ${version} finished at ${timestamp}."
- " Up ${uptime} seconds.")
+ " Datasource ${datasource}. Up ${uptime} seconds")
def handle(_name, cfg, cloud, log, args):
@@ -51,6 +51,7 @@ def handle(_name, cfg, cloud, log, args):
'uptime': uptime,
'timestamp': ts,
'version': cver,
+ 'datasource': str(cloud.datasource),
}
util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
console=False, stderr=True)
@@ -63,3 +64,6 @@ def handle(_name, cfg, cloud, log, args):
util.write_file(boot_fin_fn, contents)
except:
util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
+
+ if cloud.datasource.is_disconnected:
+ log.warn("Used fallback datasource")
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 906a6ff7..7cfb8296 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -31,6 +31,7 @@ from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
+LS_DEFAULT_FILE = "/etc/default/landscape-client"
distros = ['ubuntu']
@@ -78,6 +79,9 @@ def handle(_name, cfg, cloud, log, _args):
util.write_file(lsc_client_fn, contents.getvalue())
log.debug("Wrote landscape config file to %s", lsc_client_fn)
+ if ls_cloudcfg:
+ util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
+
def merge_together(objs):
"""
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index d3dcf7af..14c965bb 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -92,7 +92,7 @@ def handle(_name, cfg, cloud, log, _args):
# in case the user did not quote a field (likely fs-freq, fs_passno)
# but do not convert None to 'None' (LP: #898365)
for j in range(len(cfgmnt[i])):
- if j is None:
+ if cfgmnt[i][j] is None:
continue
else:
cfgmnt[i][j] = str(cfgmnt[i][j])
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 467c1496..74ee18e1 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -48,7 +48,8 @@ def handle(name, cfg, cloud, log, _args):
# Create object for reading puppet.conf values
puppet_config = helpers.DefaultingConfigParser()
# Read puppet.conf values from original file in order to be able to
- # mix the rest up. First clean them up (TODO is this really needed??)
+ # mix the rest up. First clean them up
+ # (TODO(harlowja) is this really needed??)
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
puppet_config.readfp(StringIO(cleaned_contents),
@@ -80,7 +81,7 @@ def handle(name, cfg, cloud, log, _args):
for (o, v) in cfg.iteritems():
if o == 'certname':
# Expand %f as the fqdn
- # TODO should this use the cloud fqdn??
+ # TODO(harlowja) should this use the cloud fqdn??
v = v.replace("%f", socket.getfqdn())
# Expand %i as the instance id
v = v.replace("%i", cloud.get_instance_id())
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 256a194f..e7f27944 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -22,8 +22,8 @@ import os
import stat
import time
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
frequency = PER_ALWAYS
@@ -72,12 +72,12 @@ def handle(name, cfg, cloud, log, args):
log.debug("Skipping module named %s, resizing disabled", name)
return
- # TODO is the directory ok to be used??
+ # TODO(harlowja) is the directory ok to be used??
resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
resize_root_d = cloud.paths.join(False, resize_root_d)
util.ensure_dir(resize_root_d)
- # TODO: allow what is to be resized to be configurable??
+ # TODO(harlowja): allow what is to be resized to be configurable??
resize_what = cloud.paths.join(False, "/")
with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.",
dir=resize_root_d, delete=True) as tfh:
@@ -136,5 +136,5 @@ def do_resize(resize_cmd, log):
raise
tot_time = time.time() - start
log.debug("Resizing took %.3f seconds", tot_time)
- # TODO: Should we add a fsck check after this to make
+ # TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 45d41b3f..4bf18516 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -37,9 +37,9 @@
import os
+from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
-from cloudinit.settings import PER_INSTANCE
from urlparse import parse_qs
@@ -72,7 +72,7 @@ def handle(name, _cfg, cloud, log, _args):
captured_excps = []
# These will eventually be then ran by the cc_scripts_user
- # TODO: maybe this should just be a new user data handler??
+ # TODO(harlowja): maybe this should just be a new user data handler??
# Instead of a late module that acts like a user data handler?
scripts_d = cloud.get_ipath_cur('scripts')
urls = mdict[MY_HOOKNAME]
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index ab266741..a017e6b6 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -50,8 +50,20 @@ def handle(_name, cfg, cloud, log, args):
expire = util.get_cfg_option_bool(chfg, 'expire', expire)
if not plist and password:
- user = util.get_cfg_option_str(cfg, "user", "ubuntu")
- plist = "%s:%s" % (user, password)
+ user = cloud.distro.get_default_user()
+
+ if 'users' in cfg:
+
+ user_zero = cfg['users'][0]
+
+ if isinstance(user_zero, dict) and 'name' in user_zero:
+ user = user_zero['name']
+
+ if user:
+ plist = "%s:%s" % (user, password)
+
+ else:
+ log.warn("No default or defined user to change password for.")
errors = []
if plist:
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 4019ae90..0ded62ba 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -18,11 +18,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
import glob
+import os
-from cloudinit import util
from cloudinit import ssh_util
+from cloudinit import util
DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" "
@@ -76,7 +76,7 @@ def handle(_name, cfg, cloud, log, _args):
pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
try:
- # TODO: Is this guard needed?
+ # TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
util.subp(cmd, capture=False)
log.debug("Generated a key for %s from %s", pair[0], pair[1])
@@ -94,7 +94,7 @@ def handle(_name, cfg, cloud, log, _args):
if not os.path.exists(keyfile):
cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
try:
- # TODO: Is this guard needed?
+ # TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
util.subp(cmd, capture=False)
except:
@@ -102,7 +102,16 @@ def handle(_name, cfg, cloud, log, _args):
" %s to file %s"), keytype, keyfile)
try:
- user = util.get_cfg_option_str(cfg, 'user')
+ # TODO(utlemming): consolidate this stanza that occurs in:
+ # cc_ssh_import_id, cc_set_passwords, maybe cc_users_groups.py
+ user = cloud.distro.get_default_user()
+
+ if 'users' in cfg:
+ user_zero = cfg['users'][0]
+
+ if user_zero != "default":
+ user = user_zero
+
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
DISABLE_ROOT_OPTS)
@@ -124,7 +133,9 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts):
if user:
ssh_util.setup_user_keys(keys, user, '', paths)
- if disable_root and user:
+ if disable_root:
+ if not user:
+ user = "NONE"
key_prefix = disable_root_opts.replace('$USER', user)
else:
key_prefix = ''
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
new file mode 100644
index 00000000..23f5755a
--- /dev/null
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -0,0 +1,96 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import base64
+import hashlib
+
+from prettytable import PrettyTable
+
+from cloudinit import ssh_util
+from cloudinit import util
+
+
+def _split_hash(bin_hash):
+ split_up = []
+ for i in xrange(0, len(bin_hash), 2):
+ split_up.append(bin_hash[i:i + 2])
+ return split_up
+
+
+def _gen_fingerprint(b64_text, hash_meth='md5'):
+ if not b64_text:
+ return ''
+ # TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
+ try:
+ hasher = hashlib.new(hash_meth)
+ hasher.update(base64.b64decode(b64_text))
+ return ":".join(_split_hash(hasher.hexdigest()))
+ except TypeError:
+ # Raised when b64 not really b64...
+ return '?'
+
+
+def _is_printable_key(entry):
+ if any([entry.keytype, entry.base64, entry.comment, entry.options]):
+ if (entry.keytype and
+ entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
+ return True
+ return False
+
+
+def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
+ prefix='ci-info: '):
+ if not key_entries:
+ message = ("%sno authorized ssh keys fingerprints found for user %s."
+ % (prefix, user))
+ util.multi_log(message)
+ return
+ tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
+ 'Comment']
+ tbl = PrettyTable(tbl_fields)
+ for entry in key_entries:
+ if _is_printable_key(entry):
+ row = []
+ row.append(entry.keytype or '-')
+ row.append(_gen_fingerprint(entry.base64, hash_meth) or '-')
+ row.append(entry.options or '-')
+ row.append(entry.comment or '-')
+ tbl.add_row(row)
+ authtbl_s = tbl.get_string()
+ authtbl_lines = authtbl_s.splitlines()
+ max_len = len(max(authtbl_lines, key=len))
+ lines = [
+ util.center("Authorized keys from %s for user %s" %
+ (key_fn, user), "+", max_len),
+ ]
+ lines.extend(authtbl_lines)
+ for line in lines:
+ util.multi_log(text="%s%s\n" % (prefix, line),
+ stderr=False, console=True)
+
+
+def handle(name, cfg, cloud, log, _args):
+ if 'no_ssh_fingerprints' in cfg:
+ log.debug(("Skipping module named %s, "
+ "logging of ssh fingerprints disabled"), name)
+
+ user_name = util.get_cfg_option_str(cfg, "user", "ubuntu")
+ hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
+ extract = ssh_util.extract_authorized_keys
+ (auth_key_fn, auth_key_entries) = extract(user_name, cloud.paths)
+ _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index c58b28ec..08fb63c6 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -19,35 +19,83 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import util
+import pwd
# The ssh-import-id only seems to exist on ubuntu (for now)
# https://launchpad.net/ssh-import-id
distros = ['ubuntu']
-def handle(name, cfg, _cloud, log, args):
+def handle(_name, cfg, cloud, log, args):
+
+ # import for "user: XXXXX"
if len(args) != 0:
user = args[0]
ids = []
if len(args) > 1:
ids = args[1:]
- else:
- user = util.get_cfg_option_str(cfg, "user", "ubuntu")
- ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
- if len(ids) == 0:
- log.debug("Skipping module named %s, no ids found to import", name)
+ import_ssh_ids(ids, user, log)
return
- if not user:
- log.debug("Skipping module named %s, no user found to import", name)
+ # import for cloudinit created users
+ elist = []
+ for user_cfg in cfg['users']:
+ user = None
+ import_ids = []
+
+ if isinstance(user_cfg, str) and user_cfg == "default":
+ user = cloud.distro.get_default_user()
+ if not user:
+ continue
+
+ import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
+
+ elif isinstance(user_cfg, dict):
+ user = None
+ import_ids = []
+
+ try:
+ user = user_cfg['name']
+ import_ids = user_cfg['ssh_import_id']
+
+ if import_ids and isinstance(import_ids, str):
+ import_ids = str(import_ids).split(',')
+
+ except:
+ log.debug("user %s is not configured for ssh_import" % user)
+ continue
+
+ if not len(import_ids):
+ continue
+
+ try:
+ import_ssh_ids(import_ids, user, log)
+ except Exception as exc:
+ util.logexc(log, "ssh-import-id failed for: %s %s" %
+ (user, import_ids), exc)
+ elist.append(exc)
+
+ if len(elist):
+ raise elist[0]
+
+
+def import_ssh_ids(ids, user, log):
+
+ if not (user and ids):
+ log.debug("empty user(%s) or ids(%s). not importing", user, ids)
return
+ try:
+ _check = pwd.getpwnam(user)
+ except KeyError as exc:
+ raise exc
+
cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
log.debug("Importing ssh ids for user %s.", user)
try:
util.subp(cmd, capture=False)
- except util.ProcessExecutionError as e:
+ except util.ProcessExecutionError as exc:
util.logexc(log, "Failed to run command to import %s ssh ids", user)
- raise e
+ raise exc
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 38108da7..4d75000f 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -18,8 +18,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit import util
from cloudinit import templater
+from cloudinit import util
from cloudinit.settings import PER_ALWAYS
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index b84a1a06..1d6679ea 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -20,8 +20,8 @@
import os
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
frequency = PER_ALWAYS
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
new file mode 100644
index 00000000..418f3330
--- /dev/null
+++ b/cloudinit/config/cc_users_groups.py
@@ -0,0 +1,78 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+
+def handle(name, cfg, cloud, log, _args):
+ user_zero = None
+
+ if 'groups' in cfg:
+ for group in cfg['groups']:
+ if isinstance(group, dict):
+ for name, values in group.iteritems():
+ if isinstance(values, list):
+ cloud.distro.create_group(name, values)
+ elif isinstance(values, str):
+ cloud.distro.create_group(name, values.split(','))
+ else:
+ cloud.distro.create_group(group, [])
+
+ if 'users' in cfg:
+ user_zero = None
+
+ for user_config in cfg['users']:
+
+ # Handle the default user creation
+ if 'default' in user_config:
+ log.info("Creating default user")
+
+ # Create the default user if so defined
+ try:
+ cloud.distro.add_default_user()
+
+ if not user_zero:
+ user_zero = cloud.distro.get_default_user()
+
+ except NotImplementedError:
+
+ if user_zero == name:
+ user_zero = None
+
+ log.warn("Distro has not implemented default user "
+ "creation. No default user will be created")
+
+ elif isinstance(user_config, dict) and 'name' in user_config:
+
+ name = user_config['name']
+ if not user_zero:
+ user_zero = name
+
+ # Make options friendly for distro.create_user
+ new_opts = {}
+ if isinstance(user_config, dict):
+ for opt in user_config:
+ new_opts[opt.replace('-', '_')] = user_config[opt]
+
+ cloud.distro.create_user(**new_opts)
+
+ else:
+ # create user with no configuration
+ cloud.distro.create_user(user_config)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 1bfa4c25..a73d6f4e 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -19,8 +19,8 @@
import base64
import os
-from cloudinit import util
from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
frequency = PER_INSTANCE
@@ -46,7 +46,7 @@ def canonicalize_extraction(encoding_type, log):
return ['application/x-gzip']
if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']:
return ['application/base64', 'application/x-gzip']
- # Yaml already encodes binary data as base64 if it is given to the
+ # Yaml already encodes binary data as base64 if it is given to the
# yaml file as binary, so those will be automatically decoded for you.
# But the above b64 is just for people that are more 'comfortable'
# specifing it manually (which might be a possiblity)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index da4d0180..3e9d934d 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -7,6 +7,7 @@
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -23,12 +24,17 @@
from StringIO import StringIO
import abc
+import grp
+import os
+import pwd
+import re
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import ssh_util
from cloudinit import util
-# TODO: Make this via config??
+# TODO(harlowja): Make this via config??
IFACE_ACTIONS = {
'up': ['ifup', '--all'],
'down': ['ifdown', '--all'],
@@ -40,12 +46,42 @@ LOG = logging.getLogger(__name__)
class Distro(object):
__metaclass__ = abc.ABCMeta
+ default_user = None
+ default_user_groups = None
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
self.name = name
+ def add_default_user(self):
+ # Adds the distro user using the rules:
+ # - Password is same as username but is locked
+ # - nopasswd sudo access
+
+ user = self.get_default_user()
+ groups = self.get_default_user_groups()
+
+ if not user:
+ raise NotImplementedError("No Default user")
+
+ user_dict = {
+ 'name': user,
+ 'plain_text_passwd': user,
+ 'home': "/home/%s" % user,
+ 'shell': "/bin/bash",
+ 'lock_passwd': True,
+ 'gecos': "%s%s" % (user[0:1].upper(), user[1:]),
+ 'sudo': "ALL=(ALL) NOPASSWD:ALL",
+ }
+
+ if groups:
+ user_dict['groups'] = groups
+
+ self.create_user(**user_dict)
+
+ LOG.info("Added default '%s' user with passwordless sudo", user)
+
@abc.abstractmethod
def install_packages(self, pkglist):
raise NotImplementedError()
@@ -75,8 +111,26 @@ class Distro(object):
def update_package_sources(self):
raise NotImplementedError()
- def get_package_mirror(self):
- return self.get_option('package_mirror')
+ def get_primary_arch(self):
+ arch = os.uname[4]
+ if arch in ("i386", "i486", "i586", "i686"):
+ return "i386"
+ return arch
+
+ def _get_arch_package_mirror_info(self, arch=None):
+ mirror_info = self.get_option("package_mirrors", None)
+ if arch == None:
+ arch = self.get_primary_arch()
+ return _get_arch_package_mirror_info(mirror_info, arch)
+
+ def get_package_mirror_info(self, arch=None,
+ availability_zone=None):
+ # this resolves the package_mirrors config option
+ # down to a single dict of {mirror_name: mirror_url}
+ arch_info = self._get_arch_package_mirror_info(arch)
+
+ return _get_package_mirror_info(availability_zone=availability_zone,
+ mirror_info=arch_info)
def apply_network(self, settings, bring_up=True):
# Write it out
@@ -150,6 +204,231 @@ class Distro(object):
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
+ def isuser(self, name):
+ try:
+ if pwd.getpwnam(name):
+ return True
+ except KeyError:
+ return False
+
+ def get_default_user(self):
+ return self.default_user
+
+ def get_default_user_groups(self):
+ return self.default_user_groups
+
+ def create_user(self, name, **kwargs):
+ """
+ Creates users for the system using the GNU passwd tools. This
+ will work on an GNU system. This should be overriden on
+ distros where useradd is not desirable or not available.
+ """
+
+ adduser_cmd = ['useradd', name]
+ x_adduser_cmd = ['useradd', name]
+
+ # Since we are creating users, we want to carefully validate the
+ # inputs. If something goes wrong, we can end up with a system
+ # that nobody can login to.
+ adduser_opts = {
+ "gecos": '--comment',
+ "homedir": '--home',
+ "primary_group": '--gid',
+ "groups": '--groups',
+ "passwd": '--password',
+ "shell": '--shell',
+ "expiredate": '--expiredate',
+ "inactive": '--inactive',
+ "selinux_user": '--selinux-user',
+ }
+
+ adduser_opts_flags = {
+ "no_user_group": '--no-user-group',
+ "system": '--system',
+ "no_log_init": '--no-log-init',
+ "no_create_home": "-M",
+ }
+
+ # Now check the value and create the command
+ for option in kwargs:
+ value = kwargs[option]
+ if option in adduser_opts and value \
+ and isinstance(value, str):
+ adduser_cmd.extend([adduser_opts[option], value])
+
+ # Redact the password field from the logs
+ if option != "password":
+ x_adduser_cmd.extend([adduser_opts[option], value])
+ else:
+ x_adduser_cmd.extend([adduser_opts[option], 'REDACTED'])
+
+ elif option in adduser_opts_flags and value:
+ adduser_cmd.append(adduser_opts_flags[option])
+ x_adduser_cmd.append(adduser_opts_flags[option])
+
+ # Default to creating home directory unless otherwise directed
+ # Also, we do not create home directories for system users.
+ if "no_create_home" not in kwargs and "system" not in kwargs:
+ adduser_cmd.append('-m')
+
+ # Create the user
+ if self.isuser(name):
+ LOG.warn("User %s already exists, skipping." % name)
+ else:
+ LOG.debug("Creating name %s" % name)
+ try:
+ util.subp(adduser_cmd, logstring=x_adduser_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to create user %s due to error.", e)
+ raise e
+
+ # Set password if plain-text password provided
+ if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
+ self.set_passwd(name, kwargs['plain_text_passwd'])
+
+ # Default locking down the account.
+ if ('lock_passwd' not in kwargs and
+ ('lock_passwd' in kwargs and kwargs['lock_passwd']) or
+ 'system' not in kwargs):
+ try:
+ util.subp(['passwd', '--lock', name])
+ except Exception as e:
+ util.logexc(LOG, ("Failed to disable password logins for"
+ "user %s" % name), e)
+ raise e
+
+ # Configure sudo access
+ if 'sudo' in kwargs:
+ self.write_sudo_rules(name, kwargs['sudo'])
+
+ # Import SSH keys
+ if 'ssh_authorized_keys' in kwargs:
+ keys = set(kwargs['ssh_authorized_keys']) or []
+ ssh_util.setup_user_keys(keys, name, None, self._paths)
+
+ return True
+
+ def set_passwd(self, user, passwd, hashed=False):
+ pass_string = '%s:%s' % (user, passwd)
+ cmd = ['chpasswd']
+
+ if hashed:
+ cmd.append('--encrypted')
+
+ try:
+ util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
+ except Exception as e:
+ util.logexc(LOG, "Failed to set password for %s" % user)
+ raise e
+
+ return True
+
+ def write_sudo_rules(self,
+ user,
+ rules,
+ sudo_file="/etc/sudoers.d/90-cloud-init-users",
+ ):
+
+ content_header = "# user rules for %s" % user
+ content = "%s\n%s %s\n\n" % (content_header, user, rules)
+
+ if isinstance(rules, list):
+ content = "%s\n" % content_header
+ for rule in rules:
+ content += "%s %s\n" % (user, rule)
+ content += "\n"
+
+ if not os.path.exists(sudo_file):
+ util.write_file(sudo_file, content, 0644)
+
+ else:
+ try:
+ with open(sudo_file, 'a') as f:
+ f.write(content)
+ except IOError as e:
+ util.logexc(LOG, "Failed to write %s" % sudo_file, e)
+ raise e
+
+ def isgroup(self, name):
+ try:
+ if grp.getgrnam(name):
+ return True
+ except:
+ return False
+
+ def create_group(self, name, members):
+ group_add_cmd = ['groupadd', name]
+
+ # Check if group exists, and then add it doesn't
+ if self.isgroup(name):
+ LOG.warn("Skipping creation of existing group '%s'" % name)
+ else:
+ try:
+ util.subp(group_add_cmd)
+ LOG.info("Created new group %s" % name)
+ except Exception as e:
+ util.logexc("Failed to create group %s" % name, e)
+
+ # Add members to the group, if so defined
+ if len(members) > 0:
+ for member in members:
+ if not self.isuser(member):
+ LOG.warn("Unable to add group member '%s' to group '%s'"
+ "; user does not exist." % (member, name))
+ continue
+
+ util.subp(['usermod', '-a', '-G', name, member])
+ LOG.info("Added user '%s' to group '%s'" % (member, name))
+
+
+def _get_package_mirror_info(mirror_info, availability_zone=None,
+ mirror_filter=util.search_for_mirror):
+ # given a arch specific 'mirror_info' entry (from package_mirrors)
+ # search through the 'search' entries, and fallback appropriately
+ # return a dict with only {name: mirror} entries.
+
+ ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" %
+ "north|northeast|east|southeast|south|southwest|west|northwest")
+
+ subst = {}
+ if availability_zone:
+ subst['availability_zone'] = availability_zone
+
+ if availability_zone and re.match(ec2_az_re, availability_zone):
+ subst['ec2_region'] = "%s" % availability_zone[0:-1]
+
+ results = {}
+ for (name, mirror) in mirror_info.get('failsafe', {}).iteritems():
+ results[name] = mirror
+
+ for (name, searchlist) in mirror_info.get('search', {}).iteritems():
+ mirrors = []
+ for tmpl in searchlist:
+ try:
+ mirrors.append(tmpl % subst)
+ except KeyError:
+ pass
+
+ found = mirror_filter(mirrors)
+ if found:
+ results[name] = found
+
+ LOG.debug("filtered distro mirror info: %s" % results)
+
+ return results
+
+
+def _get_arch_package_mirror_info(package_mirrors, arch):
+ # pull out the specific arch from a 'package_mirrors' config option
+ default = None
+ for item in package_mirrors:
+ arches = item.get("arches")
+ if arch in arches:
+ return item
+ if "default" in arches:
+ default = item
+ return default
+
def fetch(name):
locs = importer.find_module(name,
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 3247d7ce..5b4aa9f8 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -46,11 +46,8 @@ class Distro(distros.Distro):
out_fn = self._paths.join(False, '/etc/default/locale')
util.subp(['locale-gen', locale], capture=False)
util.subp(['update-locale', locale], capture=False)
- contents = [
- "# Created by cloud-init",
- 'LANG="%s"' % (locale),
- ]
- util.write_file(out_fn, "\n".join(contents))
+ lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""]
+ util.write_file(out_fn, "\n".join(lines))
def install_packages(self, pkglist):
self.update_package_sources()
@@ -69,11 +66,8 @@ class Distro(distros.Distro):
util.subp(['hostname', hostname])
def _write_hostname(self, hostname, out_fn):
- lines = []
- lines.append("# Created by cloud-init")
- lines.append(str(hostname))
- contents = "\n".join(lines)
- util.write_file(out_fn, contents, 0644)
+ # "" gives trailing newline.
+ util.write_file(out_fn, "%s\n" % str(hostname), 0644)
def update_hostname(self, hostname, prev_fn):
hostname_prev = self._read_hostname(prev_fn)
@@ -123,13 +117,10 @@ class Distro(distros.Distro):
if not os.path.isfile(tz_file):
raise RuntimeError(("Invalid timezone %s,"
" no file found at %s") % (tz, tz_file))
- tz_lines = [
- "# Created by cloud-init",
- str(tz),
- ]
- tz_contents = "\n".join(tz_lines)
+ # "" provides trailing newline during join
+ tz_lines = ["# Created by cloud-init", str(tz), ""]
tz_fn = self._paths.join(False, "/etc/timezone")
- util.write_file(tz_fn, tz_contents)
+ util.write_file(tz_fn, "\n".join(tz_lines))
util.copy(tz_file, self._paths.join(False, "/etc/localtime"))
def package_command(self, command, args=None):
@@ -147,3 +138,7 @@ class Distro(distros.Distro):
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
["update"], freq=PER_INSTANCE)
+
+ def get_primary_arch(self):
+ (arch, _err) = util.subp(['dpkg', '--print-architecture'])
+ return str(arch).strip()
diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py
index c777845d..9f76a116 100644
--- a/cloudinit/distros/fedora.py
+++ b/cloudinit/distros/fedora.py
@@ -28,4 +28,5 @@ LOG = logging.getLogger(__name__)
class Distro(rhel.Distro):
- pass
+ distro_name = 'fedora'
+ default_user = 'ec2-user'
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 700a98a4..b77f1b70 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -81,7 +81,7 @@ class Distro(distros.Distro):
util.write_file(resolve_rw_fn, "\n".join(contents), 0644)
def _write_network(self, settings):
- # TODO fix this... since this is the ubuntu format
+ # TODO(harlowja) fix this... since this is the ubuntu format
entries = translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
@@ -278,7 +278,7 @@ class QuotingConfigObj(ConfigObj):
# This is a util function to translate a ubuntu /etc/network/interfaces 'blob'
# to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/
-# TODO remove when we have python-netcf active...
+# TODO(harlowja) remove when we have python-netcf active...
def translate_network(settings):
# Get the standard cmd, args from the ubuntu format
entries = []
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 77c2aff4..22f8c2c5 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -7,6 +7,7 @@
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -21,11 +22,14 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit.distros import debian
-
from cloudinit import log as logging
LOG = logging.getLogger(__name__)
class Distro(debian.Distro):
- pass
+
+ distro_name = 'ubuntu'
+ default_user = 'ubuntu'
+ default_user_groups = ("adm,audio,cdrom,dialout,floppy,video,"
+ "plugdev,dip,netdev,sudo")
diff --git a/cloudinit/filters/__init__.py b/cloudinit/filters/__init__.py
new file mode 100644
index 00000000..da124641
--- /dev/null
+++ b/cloudinit/filters/__init__.py
@@ -0,0 +1,21 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
new file mode 100644
index 00000000..5bebd318
--- /dev/null
+++ b/cloudinit/filters/launch_index.py
@@ -0,0 +1,75 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import copy
+
+from cloudinit import log as logging
+from cloudinit import user_data as ud
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class Filter(object):
+ def __init__(self, wanted_idx, allow_none=True):
+ self.wanted_idx = wanted_idx
+ self.allow_none = allow_none
+
+ def _select(self, message):
+ msg_idx = message.get('Launch-Index', None)
+ if self.allow_none and msg_idx is None:
+ return True
+ msg_idx = util.safe_int(msg_idx)
+ if msg_idx != self.wanted_idx:
+ return False
+ return True
+
+ def _do_filter(self, message):
+ # Don't use walk() here since we want to do the reforming of the
+ # messages ourselves and not flatten the message listings...
+ if not self._select(message):
+ return None
+ if message.is_multipart():
+ # Recreate it and its child messages
+ prev_msgs = message.get_payload(decode=False)
+ new_msgs = []
+ discarded = 0
+ for m in prev_msgs:
+ m = self._do_filter(m)
+ if m is not None:
+ new_msgs.append(m)
+ else:
+ discarded += 1
+ LOG.debug(("Discarding %s multipart messages "
+ "which do not match launch index %s"),
+ discarded, self.wanted_idx)
+ new_message = copy.copy(message)
+ new_message.set_payload(new_msgs)
+ new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs))
+ return new_message
+ else:
+ return copy.copy(message)
+
+ def apply(self, root_message):
+ if self.wanted_idx is None:
+ return root_message
+ return self._do_filter(root_message)
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 6d1502f4..99caed1f 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -133,7 +133,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):
modfname = os.path.join(pdata['handlerdir'], "%s" % (modname))
if not modfname.endswith(".py"):
modfname = "%s.py" % (modfname)
- # TODO: Check if path exists??
+ # TODO(harlowja): Check if path exists??
util.write_file(modfname, payload, 0600)
handlers = pdata['handlers']
try:
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index a9d8e544..6c5c11ca 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -43,7 +43,7 @@ class ShellScriptPartHandler(handlers.Handler):
def _handle_part(self, _data, ctype, filename, payload, _frequency):
if ctype in handlers.CONTENT_SIGNALS:
- # TODO: maybe delete existing things here
+ # TODO(harlowja): maybe delete existing things here
return
filename = util.clean_filename(filename)
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 819c85b6..2333e5ee 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -21,8 +21,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
-import logging.handlers
import logging.config
+import logging.handlers
import collections
import os
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 2083cf60..8cc9e3b4 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -31,10 +31,13 @@ CFG_BUILTIN = {
'datasource_list': [
'NoCloud',
'ConfigDrive',
+ 'AltCloud',
'OVF',
'MAAS',
'Ec2',
- 'CloudStack'
+ 'CloudStack',
+ # At the end to act as a 'catch' when none of the above work...
+ 'None',
],
'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
new file mode 100644
index 00000000..69c376a5
--- /dev/null
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -0,0 +1,299 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joe VLcek <JVLcek@RedHat.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''
+This file contains code used to gather the user data passed to an
+instance on RHEVm and vSphere.
+'''
+
+import errno
+import os
+import os.path
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.util import ProcessExecutionError
+
+LOG = logging.getLogger(__name__)
+
+# Needed file paths
+CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
+
+# Shell command lists
+CMD_DMI_SYSTEM = ['/usr/sbin/dmidecode', '--string', 'system-product-name']
+CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
+CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
+
+META_DATA_NOT_SUPPORTED = {
+ 'block-device-mapping': {},
+ 'instance-id': 455,
+ 'local-hostname': 'localhost',
+ 'placement': {},
+ }
+
+
+def read_user_data_callback(mount_dir):
+ '''
+ Description:
+ This callback will be applied by util.mount_cb() on the mounted
+ file.
+
+ Deltacloud file name contains deltacloud. Those not using
+ Deltacloud but instead instrumenting the injection, could
+ drop deltacloud from the file name.
+
+ Input:
+ mount_dir - Mount directory
+
+ Returns:
+ User Data
+
+ '''
+
+ deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
+ user_data_file = mount_dir + '/user-data.txt'
+
+ # First try deltacloud_user_data_file. On failure try user_data_file.
+ try:
+ with open(deltacloud_user_data_file, 'r') as user_data_f:
+ user_data = user_data_f.read().strip()
+ except:
+ try:
+ with open(user_data_file, 'r') as user_data_f:
+ user_data = user_data_f.read().strip()
+ except:
+ util.logexc(LOG, ('Failed accessing user data file.'))
+ return None
+
+ return user_data
+
+
+class DataSourceAltCloud(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed = None
+ self.supported_seed_starts = ("/", "file://")
+
+ def __str__(self):
+ mstr = "%s [seed=%s]" % (util.obj_name(self), self.seed)
+ return mstr
+
+ def get_cloud_type(self):
+ '''
+ Description:
+ Get the type for the cloud back end this instance is running on
+ by examining the string returned by:
+ dmidecode --string system-product-name
+
+ On VMWare/vSphere dmidecode returns: RHEV Hypervisor
+ On VMWare/vSphere dmidecode returns: VMware Virtual Platform
+
+ Input:
+ None
+
+ Returns:
+ One of the following strings:
+ 'RHEV', 'VSPHERE' or 'UNKNOWN'
+
+ '''
+
+ cmd = CMD_DMI_SYSTEM
+ try:
+ (cmd_out, _err) = util.subp(cmd)
+ except ProcessExecutionError, _err:
+ LOG.debug(('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message))
+ return 'UNKNOWN'
+ except OSError, _err:
+ LOG.debug(('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message))
+ return 'UNKNOWN'
+
+ if cmd_out.upper().startswith('RHEV'):
+ return 'RHEV'
+
+ if cmd_out.upper().startswith('VMWARE'):
+ return 'VSPHERE'
+
+ return 'UNKNOWN'
+
+ def get_data(self):
+ '''
+ Description:
+ User Data is passed to the launching instance which
+ is used to perform instance configuration.
+
+ Cloud providers expose the user data differently.
+ It is necessary to determine which cloud provider
+ the current instance is running on to determine
+ how to access the user data. Images built with
+ image factory will contain a CLOUD_INFO_FILE which
+ contains a string identifying the cloud provider.
+
+ Images not built with Imagefactory will try to
+ determine what the cloud provider is based on system
+ information.
+ '''
+
+ LOG.debug('Invoked get_data()')
+
+ if os.path.exists(CLOUD_INFO_FILE):
+ try:
+ cloud_info = open(CLOUD_INFO_FILE)
+ cloud_type = cloud_info.read().strip().upper()
+ cloud_info.close()
+ except:
+ util.logexc(LOG, 'Unable to access cloud info file.')
+ return False
+ else:
+ cloud_type = self.get_cloud_type()
+
+ LOG.debug('cloud_type: ' + str(cloud_type))
+
+ if 'RHEV' in cloud_type:
+ if self.user_data_rhevm():
+ return True
+ elif 'VSPHERE' in cloud_type:
+ if self.user_data_vsphere():
+ return True
+ else:
+ # there was no recognized alternate cloud type
+ # indicating this handler should not be used.
+ return False
+
+ # No user data found
+ util.logexc(LOG, ('Failed accessing user data.'))
+ return False
+
+ def user_data_rhevm(self):
+ '''
+ RHEVM specific userdata read
+
+ If on RHEV-M the user data will be contained on the
+ floppy device in file <user_data_file>
+ To access it:
+ modprobe floppy
+
+ Leverage util.mount_cb to:
+ mkdir <tmp mount dir>
+ mount /dev/fd0 <tmp mount dir>
+ The call back passed to util.mount_cb will do:
+ read <tmp mount dir>/<user_data_file>
+ '''
+
+ return_str = None
+
+ # modprobe floppy
+ try:
+ cmd = CMD_PROBE_FLOPPY
+ (cmd_out, _err) = util.subp(cmd)
+ LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
+ except ProcessExecutionError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+ except OSError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+
+ floppy_dev = '/dev/fd0'
+
+ # udevadm settle for floppy device
+ try:
+ cmd = CMD_UDEVADM_SETTLE
+ cmd.append('--exit-if-exists=' + floppy_dev)
+ (cmd_out, _err) = util.subp(cmd)
+ LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
+ except ProcessExecutionError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+ except OSError, _err:
+ util.logexc(LOG, (('Failed command: %s\n%s') % \
+ (' '.join(cmd), _err.message)))
+ return False
+
+ try:
+ return_str = util.mount_cb(floppy_dev, read_user_data_callback)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ except util.MountFailedError:
+ util.logexc(LOG, ("Failed to mount %s"
+ " when looking for user data"), floppy_dev)
+
+ self.userdata_raw = return_str
+ self.metadata = META_DATA_NOT_SUPPORTED
+
+ if return_str:
+ return True
+ else:
+ return False
+
+ def user_data_vsphere(self):
+ '''
+ vSphere specific userdata read
+
+ If on vSphere the user data will be contained on the
+ cdrom device in file <user_data_file>
+ To access it:
+ Leverage util.mount_cb to:
+ mkdir <tmp mount dir>
+ mount /dev/fd0 <tmp mount dir>
+ The call back passed to util.mount_cb will do:
+ read <tmp mount dir>/<user_data_file>
+ '''
+
+ return_str = None
+ cdrom_list = util.find_devs_with('LABEL=CDROM')
+ for cdrom_dev in cdrom_list:
+ try:
+ return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
+ if return_str:
+ break
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ except util.MountFailedError:
+ util.logexc(LOG, ("Failed to mount %s"
+ " when looking for user data"), cdrom_dev)
+
+ self.userdata_raw = return_str
+ self.metadata = META_DATA_NOT_SUPPORTED
+
+ if return_str:
+ return True
+ else:
+ return False
+
+# Used to match classes to dependencies
+# Source DataSourceAltCloud does not really depend on networking.
+# In the future 'dsmode' like behavior can be added to offer user
+# the ability to run before networking.
+datasources = [
+ (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 751bef4f..f7ffa7cb 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -49,8 +49,7 @@ class DataSourceCloudStack(sources.DataSource):
self.metadata_address = "http://%s/" % (gw_addr)
def get_default_gateway(self):
- """ Returns the default gateway ip address in the dotted format
- """
+ """Returns the default gateway ip address in the dotted format."""
lines = util.load_file("/proc/net/route").splitlines()
for line in lines:
items = line.split("\t")
@@ -132,7 +131,8 @@ class DataSourceCloudStack(sources.DataSource):
def get_instance_id(self):
return self.metadata['instance-id']
- def get_availability_zone(self):
+ @property
+ def availability_zone(self):
return self.metadata['availability-zone']
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 320dd1d1..b8154367 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -30,88 +30,119 @@ LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive"
DEFAULT_MODE = 'pass'
-CFG_DRIVE_FILES = [
+CFG_DRIVE_FILES_V1 = [
"etc/network/interfaces",
"root/.ssh/authorized_keys",
"meta.js",
]
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
- "dsmode": DEFAULT_MODE,
}
-CFG_DRIVE_DEV_ENV = 'CLOUD_INIT_CONFIG_DRIVE_DEVICE'
+VALID_DSMODES = ("local", "net", "pass", "disabled")
class DataSourceConfigDrive(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.cfg = {}
+ self.source = None
self.dsmode = 'local'
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
+ self.version = None
def __str__(self):
- mstr = "%s [%s]" % (util.obj_name(self), self.dsmode)
- mstr += "[seed=%s]" % (self.seed)
+ mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode,
+ self.version)
+ mstr += "[source=%s]" % (self.source)
return mstr
def get_data(self):
found = None
md = {}
- ud = ""
+ results = {}
if os.path.isdir(self.seed_dir):
try:
- (md, ud) = read_config_drive_dir(self.seed_dir)
+ results = read_config_drive_dir(self.seed_dir)
found = self.seed_dir
except NonConfigDriveDir:
util.logexc(LOG, "Failed reading config drive from %s",
self.seed_dir)
if not found:
- dev = find_cfg_drive_device()
- if dev:
+ devlist = find_candidate_devs()
+ for dev in devlist:
try:
- (md, ud) = util.mount_cb(dev, read_config_drive_dir)
+ results = util.mount_cb(dev, read_config_drive_dir)
found = dev
+ break
except (NonConfigDriveDir, util.MountFailedError):
pass
+ except BrokenConfigDriveDir:
+ util.logexc(LOG, "broken config drive: %s", dev)
if not found:
return False
- if 'dsconfig' in md:
- self.cfg = md['dscfg']
-
+ md = results['metadata']
md = util.mergedict(md, DEFAULT_METADATA)
- # Update interfaces and ifup only on the local datasource
- # this way the DataSourceConfigDriveNet doesn't do it also.
- if 'network-interfaces' in md and self.dsmode == "local":
+ user_dsmode = results.get('dsmode', None)
+ if user_dsmode not in VALID_DSMODES + (None,):
+ LOG.warn("user specified invalid mode: %s" % user_dsmode)
+ user_dsmode = None
+
+ dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'],
+ ds_cfg=self.ds_cfg.get('dsmode'),
+ user=user_dsmode)
+
+ if dsmode == "disabled":
+ # most likely user specified
+ return False
+
+ # TODO(smoser): fix this, its dirty.
+ # we want to do some things (writing files and network config)
+ # only on first boot, and even then, we want to do so in the
+ # local datasource (so they happen earlier) even if the configured
+ # dsmode is 'net' or 'pass'. To do this, we check the previous
+ # instance-id
+ prev_iid = get_previous_iid(self.paths)
+ cur_iid = md['instance-id']
+
+ if ('network_config' in results and self.dsmode == "local" and
+ prev_iid != cur_iid):
LOG.debug("Updating network interfaces from config drive (%s)",
- md['dsmode'])
- self.distro.apply_network(md['network-interfaces'])
+ dsmode)
+ self.distro.apply_network(results['network_config'])
- self.seed = found
- self.metadata = md
- self.userdata_raw = ud
+ # file writing occurs in local mode (to be as early as possible)
+ if self.dsmode == "local" and prev_iid != cur_iid and results['files']:
+ LOG.debug("writing injected files")
+ try:
+ write_files(results['files'])
+ except:
+ util.logexc(LOG, "Failed writing files")
+
+ # dsmode != self.dsmode here if:
+ # * dsmode = "pass", pass means it should only copy files and then
+ # pass to another datasource
+ # * dsmode = "net" and self.dsmode = "local"
+ # so that user boothooks would be applied with network, the
+ # local datasource just gets out of the way, and lets the net claim
+ if dsmode != self.dsmode:
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+ return False
- if md['dsmode'] == self.dsmode:
- return True
+ self.source = found
+ self.metadata = md
+ self.userdata_raw = results.get('userdata')
+ self.version = results['cfgdrive_ver']
- LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
- return False
+ return True
def get_public_ssh_keys(self):
if not 'public-keys' in self.metadata:
return []
return self.metadata['public-keys']
- # The data sources' config_obj is a cloud-config formated
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return self.cfg
-
class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths):
@@ -123,48 +154,146 @@ class NonConfigDriveDir(Exception):
pass
-def find_cfg_drive_device():
- """ Get the config drive device. Return a string like '/dev/vdb'
- or None (if there is no non-root device attached). This does not
- check the contents, only reports that if there *were* a config_drive
- attached, it would be this device.
- Note: per config_drive documentation, this is
- "associated as the last available disk on the instance"
- """
+class BrokenConfigDriveDir(Exception):
+ pass
- # This seems to be for debugging??
- if CFG_DRIVE_DEV_ENV in os.environ:
- return os.environ[CFG_DRIVE_DEV_ENV]
- # We are looking for a raw block device (sda, not sda1) with a vfat
- # filesystem on it....
- letters = "abcdefghijklmnopqrstuvwxyz"
- devs = util.find_devs_with("TYPE=vfat")
+def find_candidate_devs():
+ """Return a list of devices that may contain the config drive.
- # Filter out anything not ending in a letter (ignore partitions)
- devs = [f for f in devs if f[-1] in letters]
+ The returned list is sorted by search order where the first item has
+ should be searched first (highest priority)
+
+ config drive v1:
+ Per documentation, this is "associated as the last available disk on the
+ instance", and should be VFAT.
+ Currently, we do not restrict search list to "last available disk"
+
+ config drive v2:
+ Disk should be:
+ * either vfat or iso9660 formated
+ * labeled with 'config-2'
+ """
- # Sort them in reverse so "last" device is first
- devs.sort(reverse=True)
+ by_fstype = (util.find_devs_with("TYPE=vfat") +
+ util.find_devs_with("TYPE=iso9660"))
+ by_label = util.find_devs_with("LABEL=config-2")
- if devs:
- return devs[0]
+ # give preference to "last available disk" (vdb over vda)
+ # note, this is not a perfect rendition of that.
+ by_fstype.sort(reverse=True)
+ by_label.sort(reverse=True)
- return None
+ # combine list of items by putting by-label items first
+ # followed by fstype items, but with dupes removed
+ combined = (by_label + [d for d in by_fstype if d not in by_label])
+
+ # We are looking for block device (sda, not sda1), ignore partitions
+ combined = [d for d in combined if d[-1] not in "0123456789"]
+
+ return combined
def read_config_drive_dir(source_dir):
+ last_e = NonConfigDriveDir("Not found")
+ for finder in (read_config_drive_dir_v2, read_config_drive_dir_v1):
+ try:
+ data = finder(source_dir)
+ return data
+ except NonConfigDriveDir as exc:
+ last_e = exc
+ raise last_e
+
+
+def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
+
+ if (not os.path.isdir(os.path.join(source_dir, "openstack", version)) and
+ os.path.isdir(os.path.join(source_dir, "openstack", "latest"))):
+ LOG.warn("version '%s' not available, attempting to use 'latest'" %
+ version)
+ version = "latest"
+
+ datafiles = (
+ ('metadata',
+ "openstack/%s/meta_data.json" % version, True, json.loads),
+ ('userdata', "openstack/%s/user_data" % version, False, None),
+ ('ec2-metadata', "ec2/latest/metadata.json", False, json.loads),
+ )
+
+ results = {'userdata': None}
+ for (name, path, required, process) in datafiles:
+ fpath = os.path.join(source_dir, path)
+ data = None
+ found = False
+ if os.path.isfile(fpath):
+ try:
+ with open(fpath) as fp:
+ data = fp.read()
+ except Exception as exc:
+ raise BrokenConfigDriveDir("failed to read: %s" % fpath)
+ found = True
+ elif required:
+ raise NonConfigDriveDir("missing mandatory %s" % fpath)
+
+ if found and process:
+ try:
+ data = process(data)
+ except Exception as exc:
+ raise BrokenConfigDriveDir("failed to process: %s" % fpath)
+
+ if found:
+ results[name] = data
+
+ # instance-id is 'uuid' for openstack. just copy it to instance-id.
+ if 'instance-id' not in results['metadata']:
+ try:
+ results['metadata']['instance-id'] = results['metadata']['uuid']
+ except KeyError:
+ raise BrokenConfigDriveDir("No uuid entry in metadata")
+
+ def read_content_path(item):
+ # do not use os.path.join here, as content_path starts with /
+ cpath = os.path.sep.join((source_dir, "openstack",
+ "./%s" % item['content_path']))
+ with open(cpath) as fp:
+ return(fp.read())
+
+ files = {}
+ try:
+ for item in results['metadata'].get('files', {}):
+ files[item['path']] = read_content_path(item)
+
+ # the 'network_config' item in metadata is a content pointer
+ # to the network config that should be applied.
+ # in folsom, it is just a '/etc/network/interfaces' file.
+ item = results['metadata'].get("network_config", None)
+ if item:
+ results['network_config'] = read_content_path(item)
+ except Exception as exc:
+ raise BrokenConfigDriveDir("failed to read file %s: %s" % (item, exc))
+
+ # to openstack, user can specify meta ('nova boot --meta=key=value') and
+ # those will appear under metadata['meta'].
+ # if they specify 'dsmode' they're indicating the mode that they intend
+ # for this datasource to operate in.
+ try:
+ results['dsmode'] = results['metadata']['meta']['dsmode']
+ except KeyError:
+ pass
+
+ results['files'] = files
+ results['cfgdrive_ver'] = 2
+ return results
+
+
+def read_config_drive_dir_v1(source_dir):
"""
- read_config_drive_dir(source_dir):
- read source_dir, and return a tuple with metadata dict and user-data
- string populated. If not a valid dir, raise a NonConfigDriveDir
+ read source_dir, and return a tuple with metadata dict, user-data,
+ files and version (1). If not a valid dir, raise a NonConfigDriveDir
"""
- # TODO: fix this for other operating systems...
- # Ie: this is where https://fedorahosted.org/netcf/ or similar should
- # be hooked in... (or could be)
found = {}
- for af in CFG_DRIVE_FILES:
+ for af in CFG_DRIVE_FILES_V1:
fn = os.path.join(source_dir, af)
if os.path.isfile(fn):
found[af] = fn
@@ -173,11 +302,10 @@ def read_config_drive_dir(source_dir):
raise NonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
md = {}
- ud = ""
keydata = ""
if "etc/network/interfaces" in found:
fn = found["etc/network/interfaces"]
- md['network-interfaces'] = util.load_file(fn)
+ md['network_config'] = util.load_file(fn)
if "root/.ssh/authorized_keys" in found:
fn = found["root/.ssh/authorized_keys"]
@@ -197,21 +325,77 @@ def read_config_drive_dir(source_dir):
(source_dir, "invalid json in meta.js", e))
md['meta_js'] = content
- # Key data override??
+ # keydata in meta_js is preferred over "injected"
keydata = meta_js.get('public-keys', keydata)
if keydata:
lines = keydata.splitlines()
md['public-keys'] = [l for l in lines
if len(l) and not l.startswith("#")]
- for copy in ('dsmode', 'instance-id', 'dscfg'):
- if copy in meta_js:
- md[copy] = meta_js[copy]
+ # config-drive-v1 has no way for openstack to provide the instance-id
+ # so we copy that into metadata from the user input
+ if 'instance-id' in meta_js:
+ md['instance-id'] = meta_js['instance-id']
+
+ results = {'cfgdrive_ver': 1, 'metadata': md}
+
+ # allow the user to specify 'dsmode' in a meta tag
+ if 'dsmode' in meta_js:
+ results['dsmode'] = meta_js['dsmode']
+
+ # config-drive-v1 has no way of specifying user-data, so the user has
+ # to cheat and stuff it in a meta tag also.
+ results['userdata'] = meta_js.get('user-data')
- if 'user-data' in meta_js:
- ud = meta_js['user-data']
+ # this implementation does not support files
+ # (other than network/interfaces and authorized_keys)
+ results['files'] = []
- return (md, ud)
+ return results
+
+
+def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
+ """Determine what mode should be used.
+ valid values are 'pass', 'disabled', 'local', 'net'
+ """
+ # user passed data trumps everything
+ if user is not None:
+ return user
+
+ if ds_cfg is not None:
+ return ds_cfg
+
+ # at config-drive version 1, the default behavior was pass. That
+ # meant to not use use it as primary data source, but expect a ec2 metadata
+ # source. for version 2, we default to 'net', which means
+ # the DataSourceConfigDriveNet, would be used.
+ #
+ # this could change in the future. If there was definitive metadata
+ # that indicated presense of an openstack metadata service, then
+ # we could change to 'pass' by default also. The motivation for that
+ # would be 'cloud-init query' as the web service could be more dynamic
+ if cfgdrv_ver == 1:
+ return "pass"
+ return "net"
+
+
+def get_previous_iid(paths):
+ # interestingly, for this purpose the "previous" instance-id is the current
+ # instance-id. cloud-init hasn't moved them over yet as this datasource
+ # hasn't declared itself found.
+ fname = os.path.join(paths.get_cpath('data'), 'instance-id')
+ try:
+ with open(fname) as fp:
+ return fp.read()
+ except IOError:
+ return None
+
+
+def write_files(files):
+ for (name, content) in files.iteritems():
+ if name[0] != os.sep:
+ name = os.sep + name
+ util.write_file(name, content, mode=0660)
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index d9eb8f17..c7ad6d54 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -40,7 +40,7 @@ DEF_MD_VERSION = '2009-04-04'
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
-DEF_MD_URLS = [DEF_MD_URL, "http://instance-data:8773"]
+DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"]
class DataSourceEc2(sources.DataSource):
@@ -77,46 +77,18 @@ class DataSourceEc2(sources.DataSource):
self.metadata_address)
return False
+ @property
+ def launch_index(self):
+ if not self.metadata:
+ return None
+ return self.metadata.get('ami-launch-index')
+
def get_instance_id(self):
return self.metadata['instance-id']
def get_availability_zone(self):
return self.metadata['placement']['availability-zone']
- def get_local_mirror(self):
- return self.get_mirror_from_availability_zone()
-
- def get_mirror_from_availability_zone(self, availability_zone=None):
- # Return type None indicates there is no cloud specific mirror
- # Availability is like 'us-west-1b' or 'eu-west-1a'
- if availability_zone is None:
- availability_zone = self.get_availability_zone()
-
- if self.is_vpc():
- return None
-
- if not availability_zone:
- return None
-
- mirror_tpl = self.distro.get_option('package_mirror_ec2_template',
- None)
-
- if mirror_tpl is None:
- return None
-
- # in EC2, the 'region' is 'us-east-1' if 'zone' is 'us-east-1a'
- tpl_params = {
- 'zone': availability_zone.strip(),
- 'region': availability_zone[:-1]
- }
- mirror_url = mirror_tpl % (tpl_params)
-
- found = util.search_for_mirror([mirror_url])
- if found is not None:
- return mirror_url
-
- return None
-
def _get_url_settings(self):
mcfg = self.ds_cfg
if not mcfg:
@@ -255,6 +227,12 @@ class DataSourceEc2(sources.DataSource):
return True
return False
+ @property
+ def availability_zone(self):
+ try:
+ return self.metadata['placement']['availability-zone']
+ except KeyError:
+ return None
# Used to match classes to dependencies
datasources = [
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
new file mode 100644
index 00000000..c2125bee
--- /dev/null
+++ b/cloudinit/sources/DataSourceNone.py
@@ -0,0 +1,61 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class DataSourceNone(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
+ self.metadata = {}
+ self.userdata_raw = ''
+
+ def get_data(self):
+ # If the datasource config has any provided 'fallback'
+ # userdata or metadata, use it...
+ if 'userdata_raw' in self.ds_cfg:
+ self.userdata_raw = self.ds_cfg['userdata_raw']
+ if 'metadata' in self.ds_cfg:
+ self.metadata = self.ds_cfg['metadata']
+ return True
+
+ def get_instance_id(self):
+ return 'iid-datasource-none'
+
+ def __str__(self):
+ return util.obj_name(self)
+
+ @property
+ def is_disconnected(self):
+ return True
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceNone, []),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index b25724a5..6f126091 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -20,6 +20,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from email.mime.multipart import MIMEMultipart
+
import abc
from cloudinit import importer
@@ -27,6 +29,8 @@ from cloudinit import log as logging
from cloudinit import user_data as ud
from cloudinit import util
+from cloudinit.filters import launch_index
+
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
DS_PREFIX = 'DataSource'
@@ -59,12 +63,34 @@ class DataSource(object):
else:
self.ud_proc = ud_proc
- def get_userdata(self):
+ def get_userdata(self, apply_filter=False):
if self.userdata is None:
- raw_data = self.get_userdata_raw()
- self.userdata = self.ud_proc.process(raw_data)
+ self.userdata = self.ud_proc.process(self.get_userdata_raw())
+ if apply_filter:
+ return self._filter_userdata(self.userdata)
return self.userdata
+ @property
+ def launch_index(self):
+ if not self.metadata:
+ return None
+ if 'launch-index' in self.metadata:
+ return self.metadata['launch-index']
+ return None
+
+ def _filter_userdata(self, processed_ud):
+ filters = [
+ launch_index.Filter(util.safe_int(self.launch_index)),
+ ]
+ new_ud = processed_ud
+ for f in filters:
+ new_ud = f.apply(new_ud)
+ return new_ud
+
+ @property
+ def is_disconnected(self):
+ return False
+
def get_userdata_raw(self):
return self.userdata_raw
@@ -113,9 +139,9 @@ class DataSource(object):
def get_locale(self):
return 'en_US.UTF-8'
- def get_local_mirror(self):
- # ??
- return None
+ @property
+ def availability_zone(self):
+ return self.metadata.get('availability-zone')
def get_instance_id(self):
if not self.metadata or 'instance-id' not in self.metadata:
@@ -147,7 +173,7 @@ class DataSource(object):
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
if util.is_ipv4(lhost):
- toks = "ip-%s" % lhost.replace(".", "-")
+ toks = [ "ip-%s" % lhost.replace(".", "-") ]
else:
toks = lhost.split(".")
@@ -162,6 +188,10 @@ class DataSource(object):
else:
return hostname
+ def get_package_mirror_info(self):
+ return self.distro.get_package_mirror_info(
+ availability_zone=self.availability_zone)
+
def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
ds_list = list_sources(cfg_list, ds_deps, pkg_list)
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index e0a2f0ca..88a11a1a 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -181,12 +181,11 @@ def parse_authorized_keys(fname):
return contents
-def update_authorized_keys(fname, keys):
- entries = parse_authorized_keys(fname)
+def update_authorized_keys(old_entries, keys):
to_add = list(keys)
- for i in range(0, len(entries)):
- ent = entries[i]
+ for i in range(0, len(old_entries)):
+ ent = old_entries[i]
if ent.empty() or not ent.base64:
continue
# Replace those with the same base64
@@ -199,66 +198,81 @@ def update_authorized_keys(fname, keys):
# Don't add it later
if k in to_add:
to_add.remove(k)
- entries[i] = ent
+ old_entries[i] = ent
# Now append any entries we did not match above
for key in to_add:
- entries.append(key)
+ old_entries.append(key)
# Now format them back to strings...
- lines = [str(b) for b in entries]
+ lines = [str(b) for b in old_entries]
# Ensure it ends with a newline
lines.append('')
return '\n'.join(lines)
-def setup_user_keys(keys, user, key_prefix, paths):
- # Make sure the users .ssh dir is setup accordingly
- pwent = pwd.getpwnam(user)
- ssh_dir = os.path.join(pwent.pw_dir, '.ssh')
- ssh_dir = paths.join(False, ssh_dir)
- if not os.path.exists(ssh_dir):
- util.ensure_dir(ssh_dir, mode=0700)
- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+def users_ssh_info(username, paths):
+ pw_ent = pwd.getpwnam(username)
+ if not pw_ent:
+ raise RuntimeError("Unable to get ssh info for user %r" % (username))
+ ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh'))
+ return (ssh_dir, pw_ent)
- # Turn the keys given into actual entries
- parser = AuthKeyLineParser()
- key_entries = []
- for k in keys:
- key_entries.append(parser.parse(str(k), def_opt=key_prefix))
+def extract_authorized_keys(username, paths):
+ (ssh_dir, pw_ent) = users_ssh_info(username, paths)
sshd_conf_fn = paths.join(True, DEF_SSHD_CFG)
+ auth_key_fn = None
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
- # AuthorizedKeysFile may contain tokens
+ # The 'AuthorizedKeysFile' may contain tokens
# of the form %T which are substituted during connection set-up.
# The following tokens are defined: %% is replaced by a literal
# '%', %h is replaced by the home directory of the user being
# authenticated and %u is replaced by the username of that user.
ssh_cfg = parse_ssh_config_map(sshd_conf_fn)
- akeys = ssh_cfg.get("authorizedkeysfile", '')
- akeys = akeys.strip()
- if not akeys:
- akeys = "%h/.ssh/authorized_keys"
- akeys = akeys.replace("%h", pwent.pw_dir)
- akeys = akeys.replace("%u", user)
- akeys = akeys.replace("%%", '%')
- if not akeys.startswith('/'):
- akeys = os.path.join(pwent.pw_dir, akeys)
- authorized_keys = paths.join(False, akeys)
+ auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
+ if not auth_key_fn:
+ auth_key_fn = "%h/.ssh/authorized_keys"
+ auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir)
+ auth_key_fn = auth_key_fn.replace("%u", username)
+ auth_key_fn = auth_key_fn.replace("%%", '%')
+ if not auth_key_fn.startswith('/'):
+ auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
+ auth_key_fn = paths.join(False, auth_key_fn)
except (IOError, OSError):
- authorized_keys = os.path.join(ssh_dir, 'authorized_keys')
+ # Give up and use a default key filename
+ auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'"
" in ssh config"
- " from %s, using 'AuthorizedKeysFile' file"
- " %s instead"),
- sshd_conf_fn, authorized_keys)
-
- content = update_authorized_keys(authorized_keys, key_entries)
- util.ensure_dir(os.path.dirname(authorized_keys), mode=0700)
- util.write_file(authorized_keys, content, mode=0600)
- util.chownbyid(authorized_keys, pwent.pw_uid, pwent.pw_gid)
+ " from %r, using 'AuthorizedKeysFile' file"
+ " %r instead"),
+ sshd_conf_fn, auth_key_fn)
+ auth_key_entries = parse_authorized_keys(auth_key_fn)
+ return (auth_key_fn, auth_key_entries)
+
+
+def setup_user_keys(keys, username, key_prefix, paths):
+ # Make sure the users .ssh dir is setup accordingly
+ (ssh_dir, pwent) = users_ssh_info(username, paths)
+ if not os.path.isdir(ssh_dir):
+ util.ensure_dir(ssh_dir, mode=0700)
+ util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+
+ # Turn the 'update' keys given into actual entries
+ parser = AuthKeyLineParser()
+ key_entries = []
+ for k in keys:
+ key_entries.append(parser.parse(str(k), def_opt=key_prefix))
+
+ # Extract the old and make the new
+ (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths)
+ with util.SeLinuxGuard(ssh_dir, recursive=True):
+ content = update_authorized_keys(auth_key_entries, key_entries)
+ util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700)
+ util.write_file(auth_key_fn, content, mode=0600)
+ util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
class SshdConfigLine(object):
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 2f6a566c..af902925 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -326,7 +326,7 @@ class Init(object):
'paths': self.paths,
'datasource': self.datasource,
}
- # TODO Hmmm, should we dynamically import these??
+ # TODO(harlowja) Hmmm, should we dynamically import these??
def_handlers = [
cc_part.CloudConfigPartHandler(**opts),
ss_part.ShellScriptPartHandler(**opts),
@@ -347,7 +347,7 @@ class Init(object):
sys.path.insert(0, idir)
# Ensure datasource fetched before activation (just incase)
- user_data_msg = self.datasource.get_userdata()
+ user_data_msg = self.datasource.get_userdata(True)
# This keeps track of all the active handlers
c_handlers = helpers.ContentHandlers()
@@ -519,7 +519,7 @@ class Modules(object):
" but not on %s distro. It may or may not work"
" correctly."), name, worked_distros, d_name)
# Use the configs logger and not our own
- # TODO: possibly check the module
+ # TODO(harlowja): possibly check the module
# for having a LOG attr and just give it back
# its own logger?
func_args = [name, self.cfg,
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index f5d01818..803ffc3a 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -23,9 +23,9 @@
import os
import email
+from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
-from email.mime.base import MIMEBase
from cloudinit import handlers
from cloudinit import log as logging
@@ -52,21 +52,23 @@ ARCHIVE_UNDEF_TYPE = "text/cloud-config"
# Msg header used to track attachments
ATTACHMENT_FIELD = 'Number-Attachments'
+# Only the following content types can have there launch index examined
+# in there payload, evey other content type can still provide a header
+EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"]
+
class UserDataProcessor(object):
def __init__(self, paths):
self.paths = paths
def process(self, blob):
- base_msg = convert_string(blob)
- process_msg = MIMEMultipart()
- self._process_msg(base_msg, process_msg)
- return process_msg
+ accumulating_msg = MIMEMultipart()
+ self._process_msg(convert_string(blob), accumulating_msg)
+ return accumulating_msg
def _process_msg(self, base_msg, append_msg):
for part in base_msg.walk():
- # multipart/* are just containers
- if part.get_content_maintype() == 'multipart':
+ if is_skippable(part):
continue
ctype = None
@@ -82,6 +84,12 @@ class UserDataProcessor(object):
if ctype is None:
ctype = ctype_orig
+ if ctype != ctype_orig:
+ if CONTENT_TYPE in part:
+ part.replace_header(CONTENT_TYPE, ctype)
+ else:
+ part[CONTENT_TYPE] = ctype
+
if ctype in INCLUDE_TYPES:
self._do_include(payload, append_msg)
continue
@@ -90,6 +98,8 @@ class UserDataProcessor(object):
self._explode_archive(payload, append_msg)
continue
+ # Should this be happening, shouldn't
+ # the part header be modified and not the base?
if CONTENT_TYPE in base_msg:
base_msg.replace_header(CONTENT_TYPE, ctype)
else:
@@ -97,11 +107,41 @@ class UserDataProcessor(object):
self._attach_part(append_msg, part)
+ def _attach_launch_index(self, msg):
+ header_idx = msg.get('Launch-Index', None)
+ payload_idx = None
+ if msg.get_content_type() in EXAMINE_FOR_LAUNCH_INDEX:
+ try:
+ # See if it has a launch-index field
+ # that might affect the final header
+ payload = util.load_yaml(msg.get_payload(decode=True))
+ if payload:
+ payload_idx = payload.get('launch-index')
+ except:
+ pass
+ # Header overrides contents, for now (?) or the other way around?
+ if header_idx is not None:
+ payload_idx = header_idx
+ # Nothing found in payload, use header (if anything there)
+ if payload_idx is None:
+ payload_idx = header_idx
+ if payload_idx is not None:
+ try:
+ msg.add_header('Launch-Index', str(int(payload_idx)))
+ except (ValueError, TypeError):
+ pass
+
def _get_include_once_filename(self, entry):
entry_fn = util.hash_blob(entry, 'md5', 64)
return os.path.join(self.paths.get_ipath_cur('data'),
'urlcache', entry_fn)
+ def _process_before_attach(self, msg, attached_id):
+ if not msg.get_filename():
+ msg.add_header('Content-Disposition',
+ 'attachment', filename=PART_FN_TPL % (attached_id))
+ self._attach_launch_index(msg)
+
def _do_include(self, content, append_msg):
# Include a list of urls, one per line
# also support '#include <url here>'
@@ -148,7 +188,7 @@ class UserDataProcessor(object):
self._process_msg(new_msg, append_msg)
def _explode_archive(self, archive, append_msg):
- entries = util.load_yaml(archive, default=[], allowed=[list, set])
+ entries = util.load_yaml(archive, default=[], allowed=(list, set))
for ent in entries:
# ent can be one of:
# dict { 'filename' : 'value', 'content' :
@@ -159,7 +199,7 @@ class UserDataProcessor(object):
if isinstance(ent, (str, basestring)):
ent = {'content': ent}
if not isinstance(ent, (dict)):
- # TODO raise?
+ # TODO(harlowja) raise?
continue
content = ent.get('content', '')
@@ -178,9 +218,11 @@ class UserDataProcessor(object):
if 'filename' in ent:
msg.add_header('Content-Disposition',
'attachment', filename=ent['filename'])
+ if 'launch-index' in ent:
+ msg.add_header('Launch-Index', str(ent['launch-index']))
for header in list(ent.keys()):
- if header in ('content', 'filename', 'type'):
+ if header in ('content', 'filename', 'type', 'launch-index'):
continue
msg.add_header(header, ent['header'])
@@ -204,21 +246,23 @@ class UserDataProcessor(object):
outer_msg.replace_header(ATTACHMENT_FIELD, str(fetched_count))
return fetched_count
- def _part_filename(self, _unnamed_part, count):
- return PART_FN_TPL % (count + 1)
-
def _attach_part(self, outer_msg, part):
"""
- Attach an part to an outer message. outermsg must be a MIMEMultipart.
- Modifies a header in the message to keep track of number of attachments.
+ Attach a message to an outer message. outermsg must be a MIMEMultipart.
+ Modifies a header in the outer message to keep track of number of attachments.
"""
- cur_c = self._multi_part_count(outer_msg)
- if not part.get_filename():
- fn = self._part_filename(part, cur_c)
- part.add_header('Content-Disposition',
- 'attachment', filename=fn)
+ part_count = self._multi_part_count(outer_msg)
+ self._process_before_attach(part, part_count + 1)
outer_msg.attach(part)
- self._multi_part_count(outer_msg, cur_c + 1)
+ self._multi_part_count(outer_msg, part_count + 1)
+
+
+def is_skippable(part):
+ # multipart/* are just containers
+ part_maintype = part.get_content_maintype() or ''
+ if part_maintype.lower() == 'multipart':
+ return True
+ return False
# Coverts a raw string into a mime message
diff --git a/cloudinit/util.py b/cloudinit/util.py
index a8c0cceb..33da73eb 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -24,8 +24,8 @@
from StringIO import StringIO
-import copy as obj_copy
import contextlib
+import copy as obj_copy
import errno
import glob
import grp
@@ -317,8 +317,9 @@ def multi_log(text, console=True, stderr=True,
else:
log.log(log_level, text)
+
def is_ipv4(instr):
- """ determine if input string is a ipv4 address. return boolean"""
+ """determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.')
if len(toks) != 4:
return False
@@ -826,12 +827,12 @@ def get_cmdline_url(names=('cloud-config-url', 'url'),
def is_resolvable(name):
- """ determine if a url is resolvable, return a boolean
+ """determine if a url is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
Note, that normal nsswitch resolution is used here. So in order
to avoid any utilization of 'search' entries in /etc/resolv.conf
- we have to append '.'.
+ we have to append '.'.
The top level 'invalid' domain is invalid per RFC. And example.com
should also not exist. The random entry will be resolved inside
@@ -847,7 +848,7 @@ def is_resolvable(name):
try:
result = socket.getaddrinfo(iname, None, 0, 0,
socket.SOCK_STREAM, socket.AI_CANONNAME)
- badresults[iname] = []
+ badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
badips.add(sockaddr[0])
@@ -856,7 +857,7 @@ def is_resolvable(name):
_DNS_REDIRECT_IP = badips
if badresults:
LOG.debug("detected dns redirection: %s" % badresults)
-
+
try:
result = socket.getaddrinfo(name, None)
# check first result's sockaddr field
@@ -874,7 +875,7 @@ def get_hostname():
def is_resolvable_url(url):
- """ determine if this url is resolvable (existing or ip) """
+ """determine if this url is resolvable (existing or ip)."""
return (is_resolvable(urlparse.urlparse(url).hostname))
@@ -1105,7 +1106,7 @@ def hash_blob(blob, routine, mlen=None):
def rename(src, dest):
LOG.debug("Renaming %s to %s", src, dest)
- # TODO use a se guard here??
+ # TODO(harlowja) use a se guard here??
os.rename(src, dest)
@@ -1284,12 +1285,15 @@ def ensure_file(path, mode=0644):
write_file(path, content='', omode="ab", mode=mode)
-def chmod(path, mode):
- real_mode = None
+def safe_int(possible_int):
try:
- real_mode = int(mode)
+ return int(possible_int)
except (ValueError, TypeError):
- pass
+ return None
+
+
+def chmod(path, mode):
+ real_mode = safe_int(mode)
if path and real_mode:
with SeLinuxGuard(path):
os.chmod(path, real_mode)
@@ -1329,12 +1333,19 @@ def delete_dir_contents(dirname):
del_file(node_fullpath)
-def subp(args, data=None, rcs=None, env=None, capture=True, shell=False):
+def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
+ logstring=False):
if rcs is None:
rcs = [0]
try:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+
+ if not logstring:
+ LOG.debug(("Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+ else:
+ LOG.debug(("Running hidden command to protect sensitive "
+ "input/output logstring: %s"), logstring)
+
if not capture:
stdout = None
stderr = None