summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVlastimil Holer <vlastimil.holer@gmail.com>2013-09-05 13:11:09 +0200
committerVlastimil Holer <vlastimil.holer@gmail.com>2013-09-05 13:11:09 +0200
commit744c779182cba32314f8435660a61c2711cb9f54 (patch)
tree7871342bf0b122217b51493286bac982313b48da
parent8a2a88e0bb4520eabe99b6686413a548f3d59652 (diff)
parent1d27cd75eaaeef7b72f3be77de24da815c82a825 (diff)
downloadvyos-cloud-init-744c779182cba32314f8435660a61c2711cb9f54.tar.gz
vyos-cloud-init-744c779182cba32314f8435660a61c2711cb9f54.zip
Merged trunk lp:cloud-init
-rw-r--r--ChangeLog36
-rw-r--r--Makefile14
-rw-r--r--Requires16
-rwxr-xr-xbin/cloud-init4
-rw-r--r--cloudinit/config/cc_apt_configure.py47
-rw-r--r--cloudinit/config/cc_bootcmd.py5
-rw-r--r--cloudinit/config/cc_chef.py2
-rw-r--r--cloudinit/config/cc_growpart.py277
-rw-r--r--cloudinit/config/cc_landscape.py4
-rw-r--r--cloudinit/config/cc_mounts.py3
-rw-r--r--cloudinit/config/cc_phone_home.py19
-rw-r--r--cloudinit/config/cc_power_state_change.py4
-rw-r--r--cloudinit/config/cc_resizefs.py152
-rw-r--r--cloudinit/config/cc_resolv_conf.py4
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py10
-rw-r--r--cloudinit/config/cc_set_hostname.py6
-rw-r--r--cloudinit/config/cc_set_passwords.py8
-rw-r--r--cloudinit/config/cc_ssh.py14
-rw-r--r--cloudinit/config/cc_ssh_import_id.py6
-rw-r--r--cloudinit/config/cc_update_hostname.py6
-rw-r--r--cloudinit/distros/__init__.py166
-rw-r--r--cloudinit/distros/debian.py27
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py4
-rw-r--r--cloudinit/distros/rhel.py244
-rw-r--r--cloudinit/distros/rhel_util.py177
-rw-r--r--cloudinit/distros/sles.py185
-rw-r--r--cloudinit/ec2_utils.py4
-rw-r--r--cloudinit/handlers/__init__.py94
-rw-r--r--cloudinit/handlers/boot_hook.py20
-rw-r--r--cloudinit/handlers/cloud_config.py136
-rw-r--r--cloudinit/handlers/shell_script.py6
-rw-r--r--cloudinit/handlers/upstart_job.py60
-rw-r--r--cloudinit/helpers.py26
-rw-r--r--cloudinit/log.py6
-rw-r--r--cloudinit/mergers/__init__.py167
-rw-r--r--cloudinit/mergers/m_dict.py86
-rw-r--r--cloudinit/mergers/m_list.py87
-rw-r--r--cloudinit/mergers/m_str.py44
-rw-r--r--cloudinit/settings.py2
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py36
-rw-r--r--cloudinit/sources/DataSourceAzure.py502
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py9
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py10
-rw-r--r--cloudinit/sources/DataSourceEc2.py3
-rw-r--r--cloudinit/sources/DataSourceMAAS.py60
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py29
-rw-r--r--cloudinit/sources/DataSourceNone.py4
-rw-r--r--cloudinit/sources/DataSourceOVF.py14
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py244
-rw-r--r--cloudinit/sources/__init__.py13
-rw-r--r--cloudinit/ssh_util.py124
-rw-r--r--cloudinit/stages.py148
-rw-r--r--cloudinit/type_utils.py34
-rw-r--r--cloudinit/url_helper.py268
-rw-r--r--cloudinit/user_data.py105
-rw-r--r--cloudinit/util.py321
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg1
-rw-r--r--doc/examples/cloud-config-TODO.txt20
-rw-r--r--doc/examples/cloud-config-datasources.txt23
-rw-r--r--doc/examples/cloud-config-growpart.txt24
-rw-r--r--doc/examples/cloud-config.txt21
-rw-r--r--doc/merging.rst188
-rw-r--r--doc/rtd/conf.py8
-rw-r--r--doc/rtd/index.rst1
-rw-r--r--[-rwxr-xr-x]doc/rtd/static/logo.svg0
-rw-r--r--doc/rtd/topics/merging.rst5
-rw-r--r--doc/sources/azure/README.rst134
-rw-r--r--doc/sources/smartos/README.rst72
-rwxr-xr-xpackages/bddeb28
-rwxr-xr-xpackages/brpm50
-rw-r--r--packages/debian/control.in3
-rw-r--r--packages/suse/cloud-init.spec.in162
-rwxr-xr-xsetup.py9
-rw-r--r--sysvinit/debian/cloud-config64
-rw-r--r--sysvinit/debian/cloud-final66
-rwxr-xr-xsysvinit/debian/cloud-init64
-rw-r--r--sysvinit/debian/cloud-init-local63
-rwxr-xr-xsysvinit/redhat/cloud-config (renamed from sysvinit/cloud-config)11
-rwxr-xr-xsysvinit/redhat/cloud-final (renamed from sysvinit/cloud-final)11
-rwxr-xr-xsysvinit/redhat/cloud-init (renamed from sysvinit/cloud-init)11
-rwxr-xr-xsysvinit/redhat/cloud-init-local (renamed from sysvinit/cloud-init-local)11
-rw-r--r--templates/hosts.suse.tmpl24
-rw-r--r--templates/sources.list.debian.tmpl28
-rw-r--r--templates/sources.list.ubuntu.tmpl (renamed from templates/sources.list.tmpl)0
-rw-r--r--tests/data/merge_sources/expected1.yaml1
-rw-r--r--tests/data/merge_sources/expected10.yaml7
-rw-r--r--tests/data/merge_sources/expected11.yaml5
-rw-r--r--tests/data/merge_sources/expected12.yaml5
-rw-r--r--tests/data/merge_sources/expected2.yaml3
-rw-r--r--tests/data/merge_sources/expected3.yaml1
-rw-r--r--tests/data/merge_sources/expected4.yaml2
-rw-r--r--tests/data/merge_sources/expected5.yaml7
-rw-r--r--tests/data/merge_sources/expected6.yaml9
-rw-r--r--tests/data/merge_sources/expected7.yaml38
-rw-r--r--tests/data/merge_sources/expected8.yaml7
-rw-r--r--tests/data/merge_sources/expected9.yaml5
-rw-r--r--tests/data/merge_sources/source1-1.yaml3
-rw-r--r--tests/data/merge_sources/source1-2.yaml5
-rw-r--r--tests/data/merge_sources/source10-1.yaml6
-rw-r--r--tests/data/merge_sources/source10-2.yaml6
-rw-r--r--tests/data/merge_sources/source11-1.yaml5
-rw-r--r--tests/data/merge_sources/source11-2.yaml3
-rw-r--r--tests/data/merge_sources/source11-3.yaml3
-rw-r--r--tests/data/merge_sources/source12-1.yaml8
-rw-r--r--tests/data/merge_sources/source12-2.yaml5
-rw-r--r--tests/data/merge_sources/source2-1.yaml6
-rw-r--r--tests/data/merge_sources/source2-2.yaml5
-rw-r--r--tests/data/merge_sources/source3-1.yaml4
-rw-r--r--tests/data/merge_sources/source3-2.yaml4
-rw-r--r--tests/data/merge_sources/source4-1.yaml3
-rw-r--r--tests/data/merge_sources/source4-2.yaml6
-rw-r--r--tests/data/merge_sources/source5-1.yaml6
-rw-r--r--tests/data/merge_sources/source5-2.yaml8
-rw-r--r--tests/data/merge_sources/source6-1.yaml5
-rw-r--r--tests/data/merge_sources/source6-2.yaml8
-rw-r--r--tests/data/merge_sources/source7-1.yaml27
-rw-r--r--tests/data/merge_sources/source7-2.yaml17
-rw-r--r--tests/data/merge_sources/source8-1.yaml7
-rw-r--r--tests/data/merge_sources/source8-2.yaml6
-rw-r--r--tests/data/merge_sources/source9-1.yaml5
-rw-r--r--tests/data/merge_sources/source9-2.yaml6
-rw-r--r--tests/data/mountinfo_precise_ext4.txt24
-rw-r--r--tests/data/mountinfo_raring_btrfs.txt13
-rw-r--r--tests/unittests/helpers.py4
-rw-r--r--tests/unittests/test__init__.py36
-rw-r--r--tests/unittests/test_builtin_handlers.py22
-rw-r--r--tests/unittests/test_datasource/test_azure.py331
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py5
-rw-r--r--tests/unittests/test_datasource/test_maas.py12
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py4
-rw-r--r--tests/unittests/test_datasource/test_smartos.py273
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure.py106
-rw-r--r--tests/unittests/test_handler/test_handler_growpart.py258
-rw-r--r--tests/unittests/test_handler/test_handler_locale.py64
-rw-r--r--tests/unittests/test_handler/test_handler_set_hostname.py14
-rw-r--r--tests/unittests/test_handler/test_handler_timezone.py75
-rw-r--r--tests/unittests/test_merging.py309
-rw-r--r--tests/unittests/test_sshutil.py101
-rw-r--r--tests/unittests/test_userdata.py207
-rw-r--r--tests/unittests/test_util.py59
-rwxr-xr-xtools/ccfg-merge-debug89
-rwxr-xr-xtools/make-dist-tarball14
-rwxr-xr-xtools/make-tarball12
-rwxr-xr-xtools/read-dependencies8
-rwxr-xr-xtools/read-version4
-rw-r--r--upstart/cloud-init-nonet.conf63
147 files changed, 6477 insertions, 1093 deletions
diff --git a/ChangeLog b/ChangeLog
index 7d1503bf..4b2770a4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,20 @@
+0.7.3:
+ - fix omnibus chef installer (LP: #1182265) [Chris Wing]
+ - small fix for OVF datasource for iso transport on non-iso9660 filesystem
+ - determine if upstart version is suitable for
+ 'initctl reload-configuration' (LP: #1124384). If so, then invoke it.
+ - add Azure datasource.
+ - add support for SuSE / SLES [Juerg Haefliger]
+ - add a trailing carriage return to chpasswd input, which reportedly
+ caused a problem on rhel5 if missing.
+ - support individual MIME segments to be gzip compressed (LP: #1203203)
+ - always finalize handlers even if processing failed (LP: #1203368)
+ - support merging into cloud-config via jsonp. (LP: #1200476)
+ - add datasource 'SmartOS' for Joyent Cloud. Adds a dependency on serial.
+ - add 'log_time' helper to util for timing how long things take
+ which also reads from uptime. uptime is useful as clock may change during
+ boot due to ntp.
+ - prefer growpart resizer to 'parted resizepart' (LP: #1212492)
0.7.2:
- add a debian watch file
- add 'sudo' entry to ubuntu's default user (LP: #1080717)
@@ -40,6 +57,25 @@
- cloud-init-container.conf: ensure /run/network before running ifquery
- DataSourceNoCloud: allow user-data and meta-data to be specified
in config (LP: #1115833).
+ - improve debian support in sysvinit scripts, package build scripts, and
+ split sources.list template to be distro specific.
+ - support for resizing btrfs root filesystems [Blair Zajac]
+ - fix issue when writing ssh keys to .ssh/authorized_keys (LP: #1136343)
+ - upstart: cloud-init-nonet.conf trap the TERM signal, so that dmesg or other
+ output does not get a 'killed by TERM signal' message.
+ - support resizing partitions via growpart or parted (LP: #1136936)
+ - allow specifying apt-get command in distro config ('apt_get_command')
+ - support different and user-suppliable merging algorithms for cloud-config
+ (LP: #1023179)
+ - use python-requests rather than urllib2. By using recent versions of
+ python-requests, we get https support (LP: #1067888).
+ - make apt-get invoke 'dist-upgrade' rather than 'upgrade' for
+ package_upgrade. (LP: #1164147)
+ - improvements for systemd with Fedora 18
+ - workaround 2.6 kernel issue that stopped blkid from showing /dev/sr0
+ - add new, backwards compatible merging syntax so merging of cloud-config
+ can be more useful.
+
0.7.1:
- sysvinit: fix missing dependency in cloud-init job for RHEL 5.6
- config-drive: map hostname to local-hostname (LP: #1061964)
diff --git a/Makefile b/Makefile
index b659836f..8cf1659a 100644
--- a/Makefile
+++ b/Makefile
@@ -8,6 +8,10 @@ YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version)
CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()")
+ifeq ($(distro),)
+ distro = redhat
+endif
+
all: test check_version
pep8:
@@ -24,9 +28,9 @@ test:
check_version:
@if [ "$(CHANGELOG_VERSION)" != "$(CODE_VERSION)" ]; then \
- echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \
- "not equal to code version $(CODE_VERSION)"; exit 2; \
- else true; fi
+ echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \
+ "not equal to code version $(CODE_VERSION)"; exit 2; \
+ else true; fi
2to3:
2to3 $(PY_FILES)
@@ -37,9 +41,9 @@ clean:
yaml:
@$(CWD)/tools/validate-yaml.py $(YAML_FILES)
-
+
rpm:
- ./packages/brpm
+ ./packages/brpm --distro $(distro)
deb:
./packages/bddeb
diff --git a/Requires b/Requires
index 4f9311d5..f19c9691 100644
--- a/Requires
+++ b/Requires
@@ -10,10 +10,9 @@ PrettyTable
# datasource is removed, this is no longer needed
oauth
-# This is used to fetch the ec2 metadata into a easily
-# parseable format, instead of having to have cloud-init perform
-# those same fetchs and decodes and signing (...) that ec2 requires.
-boto
+# This one is currently used only by the SmartOS datasource. If that
+# datasource is removed, this is no longer needed
+pyserial
# This is only needed for places where we need to support configs in a manner
# that the built-in config parser is not sufficent (ie
@@ -26,3 +25,12 @@ pyyaml
# The new main entrypoint uses argparse instead of optparse
argparse
+
+# Requests handles ssl correctly!
+requests
+
+# Boto for ec2
+boto
+
+# For patching pieces of cloud-config together
+jsonpatch
diff --git a/bin/cloud-init b/bin/cloud-init
index c5a5b949..b4f9fd07 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -502,7 +502,9 @@ def main():
signal_handler.attach_handlers()
(name, functor) = args.action
- return functor(name, args)
+
+ return util.log_time(logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
+ get_uptime=True, func=functor, args=(name, args))
if __name__ == '__main__':
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index f8664160..5a407016 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -27,7 +27,8 @@ from cloudinit import util
distros = ['ubuntu', 'debian']
PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
-PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
+APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config"
+APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
# A temporary shell program to get a given gpg key
# from a given keyserver
@@ -67,18 +68,10 @@ def handle(name, cfg, cloud, log, _args):
"security": "security.ubuntu.com/ubuntu"})
rename_apt_lists(old_mirrors, mirrors)
- # Set up any apt proxy
- proxy = cfg.get("apt_proxy", None)
- proxy_filename = PROXY_FN
- if proxy:
- try:
- # See man 'apt.conf'
- contents = PROXY_TPL % (proxy)
- util.write_file(proxy_filename, contents)
- except Exception as e:
- util.logexc(log, "Failed to write proxy to %s", proxy_filename)
- elif os.path.isfile(proxy_filename):
- util.del_file(proxy_filename)
+ try:
+ apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
+ except Exception as e:
+ log.warn("failed to proxy or apt config info: %s", e)
# Process 'apt_sources'
if 'apt_sources' in cfg:
@@ -140,10 +133,13 @@ def get_release():
def generate_sources_list(codename, mirrors, cloud, log):
- template_fn = cloud.get_template_filename('sources.list')
+ template_fn = cloud.get_template_filename('sources.list.%s' %
+ (cloud.distro.name))
if not template_fn:
- log.warn("No template found, not rendering /etc/apt/sources.list")
- return
+ template_fn = cloud.get_template_filename('sources.list')
+ if not template_fn:
+ log.warn("No template found, not rendering /etc/apt/sources.list")
+ return
params = {'codename': codename}
for k in mirrors:
@@ -253,3 +249,22 @@ def find_apt_mirror_info(cloud, cfg):
mirror_info.update({'primary': mirror})
return mirror_info
+
+
+def apply_apt_config(cfg, proxy_fname, config_fname):
+ # Set up any apt proxy
+ cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'),
+ ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'),
+ ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'),
+ ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";'))
+
+ proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
+ if len(proxies):
+ util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
+ elif os.path.isfile(proxy_fname):
+ util.del_file(proxy_fname)
+
+ if cfg.get('apt_config', None):
+ util.write_file(config_fname, cfg.get('apt_config'))
+ elif os.path.isfile(config_fname):
+ util.del_file(config_fname)
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 896cb4d0..3ac22967 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
@@ -50,6 +50,5 @@ def handle(name, cfg, cloud, log, _args):
cmd = ['/bin/sh', tmpf.name]
util.subp(cmd, env=env, capture=False)
except:
- util.logexc(log,
- ("Failed to run bootcmd module %s"), name)
+ util.logexc(log, "Failed to run bootcmd module %s", name)
raise
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 607f789e..727769cd 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -110,7 +110,7 @@ def handle(name, cfg, cloud, log, _args):
with util.tempdir() as tmpd:
# use tmpd over tmpfile to avoid 'Text file busy' on execute
tmpf = "%s/chef-omnibus-install" % tmpd
- util.write_file(tmpf, content, mode=0700)
+ util.write_file(tmpf, str(content), mode=0700)
util.subp([tmpf], capture=False)
else:
log.warn("Unknown chef install type %s", install_type)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
new file mode 100644
index 00000000..2d54aabf
--- /dev/null
+++ b/cloudinit/config/cc_growpart.py
@@ -0,0 +1,277 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import os.path
+import re
+import stat
+
+from cloudinit import log as logging
+from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
+
+frequency = PER_ALWAYS
+
+DEFAULT_CONFIG = {
+ 'mode': 'auto',
+ 'devices': ['/'],
+}
+
+
+def enum(**enums):
+ return type('Enum', (), enums)
+
+
+RESIZE = enum(SKIPPED="SKIPPED", CHANGED="CHANGED", NOCHANGE="NOCHANGE",
+ FAILED="FAILED")
+
+LOG = logging.getLogger(__name__)
+
+
+def resizer_factory(mode):
+ resize_class = None
+ if mode == "auto":
+ for (_name, resizer) in RESIZERS:
+ cur = resizer()
+ if cur.available():
+ resize_class = cur
+ break
+
+ if not resize_class:
+ raise ValueError("No resizers available")
+
+ else:
+ mmap = {}
+ for (k, v) in RESIZERS:
+ mmap[k] = v
+
+ if mode not in mmap:
+ raise TypeError("unknown resize mode %s" % mode)
+
+ mclass = mmap[mode]()
+ if mclass.available():
+ resize_class = mclass
+
+ if not resize_class:
+ raise ValueError("mode %s not available" % mode)
+
+ return resize_class
+
+
+class ResizeFailedException(Exception):
+ pass
+
+
+class ResizeParted(object):
+ def available(self):
+ myenv = os.environ.copy()
+ myenv['LANG'] = 'C'
+
+ try:
+ (out, _err) = util.subp(["parted", "--help"], env=myenv)
+ if re.search(r"COMMAND.*resizepart\s+", out, re.DOTALL):
+ return True
+
+ except util.ProcessExecutionError:
+ pass
+ return False
+
+ def resize(self, diskdev, partnum, partdev):
+ before = get_size(partdev)
+ try:
+ util.subp(["parted", diskdev, "resizepart", partnum])
+ except util.ProcessExecutionError as e:
+ raise ResizeFailedException(e)
+
+ return (before, get_size(partdev))
+
+
+class ResizeGrowPart(object):
+ def available(self):
+ myenv = os.environ.copy()
+ myenv['LANG'] = 'C'
+
+ try:
+ (out, _err) = util.subp(["growpart", "--help"], env=myenv)
+ if re.search(r"--update\s+", out, re.DOTALL):
+ return True
+
+ except util.ProcessExecutionError:
+ pass
+ return False
+
+ def resize(self, diskdev, partnum, partdev):
+ before = get_size(partdev)
+ try:
+ util.subp(["growpart", '--dry-run', diskdev, partnum])
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
+ diskdev, partnum)
+ raise ResizeFailedException(e)
+ return (before, before)
+
+ try:
+ util.subp(["growpart", diskdev, partnum])
+ except util.ProcessExecutionError as e:
+ util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
+ raise ResizeFailedException(e)
+
+ return (before, get_size(partdev))
+
+
+def get_size(filename):
+ fd = os.open(filename, os.O_RDONLY)
+ try:
+ return os.lseek(fd, 0, os.SEEK_END)
+ finally:
+ os.close(fd)
+
+
+def device_part_info(devpath):
+ # convert an entry in /dev/ to parent disk and partition number
+
+ # input of /dev/vdb or /dev/disk/by-label/foo
+ # rpath is hopefully a real-ish path in /dev (vda, sdb..)
+ rpath = os.path.realpath(devpath)
+
+ bname = os.path.basename(rpath)
+ syspath = "/sys/class/block/%s" % bname
+
+ if not os.path.exists(syspath):
+ raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
+
+ ptpath = os.path.join(syspath, "partition")
+ if not os.path.exists(ptpath):
+ raise TypeError("%s not a partition" % devpath)
+
+ ptnum = util.load_file(ptpath).rstrip()
+
+ # for a partition, real syspath is something like:
+ # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1
+ rsyspath = os.path.realpath(syspath)
+ disksyspath = os.path.dirname(rsyspath)
+
+ diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip()
+ diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin)
+
+ # diskdevpath has something like 253:0
+ # and udev has put links in /dev/block/253:0 to the device name in /dev/
+ return (diskdevpath, ptnum)
+
+
+def devent2dev(devent):
+ if devent.startswith("/dev/"):
+ return devent
+ else:
+ result = util.get_mount_info(devent)
+ if not result:
+ raise ValueError("Could not determine device of '%s' % dev_ent")
+ return result[0]
+
+
+def resize_devices(resizer, devices):
+ # returns a tuple of tuples containing (entry-in-devices, action, message)
+ info = []
+ for devent in devices:
+ try:
+ blockdev = devent2dev(devent)
+ except ValueError as e:
+ info.append((devent, RESIZE.SKIPPED,
+ "unable to convert to device: %s" % e,))
+ continue
+
+ try:
+ statret = os.stat(blockdev)
+ except OSError as e:
+ info.append((devent, RESIZE.SKIPPED,
+ "stat of '%s' failed: %s" % (blockdev, e),))
+ continue
+
+ if not stat.S_ISBLK(statret.st_mode):
+ info.append((devent, RESIZE.SKIPPED,
+ "device '%s' not a block device" % blockdev,))
+ continue
+
+ try:
+ (disk, ptnum) = device_part_info(blockdev)
+ except (TypeError, ValueError) as e:
+ info.append((devent, RESIZE.SKIPPED,
+ "device_part_info(%s) failed: %s" % (blockdev, e),))
+ continue
+
+ try:
+ (old, new) = resizer.resize(disk, ptnum, blockdev)
+ if old == new:
+ info.append((devent, RESIZE.NOCHANGE,
+ "no change necessary (%s, %s)" % (disk, ptnum),))
+ else:
+ info.append((devent, RESIZE.CHANGED,
+ "changed (%s, %s) from %s to %s" %
+ (disk, ptnum, old, new),))
+
+ except ResizeFailedException as e:
+ info.append((devent, RESIZE.FAILED,
+ "failed to resize: disk=%s, ptnum=%s: %s" %
+ (disk, ptnum, e),))
+
+ return info
+
+
+def handle(_name, cfg, _cloud, log, _args):
+ if 'growpart' not in cfg:
+ log.debug("No 'growpart' entry in cfg. Using default: %s" %
+ DEFAULT_CONFIG)
+ cfg['growpart'] = DEFAULT_CONFIG
+
+ mycfg = cfg.get('growpart')
+ if not isinstance(mycfg, dict):
+ log.warn("'growpart' in config was not a dict")
+ return
+
+ mode = mycfg.get('mode', "auto")
+ if util.is_false(mode):
+ log.debug("growpart disabled: mode=%s" % mode)
+ return
+
+ devices = util.get_cfg_option_list(cfg, "devices", ["/"])
+ if not len(devices):
+ log.debug("growpart: empty device list")
+ return
+
+ try:
+ resizer = resizer_factory(mode)
+ except (ValueError, TypeError) as e:
+ log.debug("growpart unable to find resizer for '%s': %s" % (mode, e))
+ if mode != "auto":
+ raise e
+ return
+
+ resized = util.log_time(logfunc=log.debug, msg="resize_devices",
+ func=resize_devices, args=(resizer, devices))
+ for (entry, action, msg) in resized:
+ if action == RESIZE.CHANGED:
+ log.info("'%s' resized: %s" % (entry, msg))
+ else:
+ log.debug("'%s' %s: %s" % (entry, action, msg))
+
+# LP: 1212444 FIXME re-order and favor ResizeParted
+#RESIZERS = (('growpart', ResizeGrowPart),)
+RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted))
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 2efdff79..8a709677 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -24,6 +24,7 @@ from StringIO import StringIO
from configobj import ConfigObj
+from cloudinit import type_utils
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -58,7 +59,8 @@ def handle(_name, cfg, cloud, log, _args):
if not isinstance(ls_cloudcfg, (dict)):
raise RuntimeError(("'landscape' key existed in config,"
" but not a dictionary type,"
- " is a %s instead"), util.obj_name(ls_cloudcfg))
+ " is a %s instead"),
+ type_utils.obj_name(ls_cloudcfg))
if not ls_cloudcfg:
return
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 9010d97f..390ba711 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -22,6 +22,7 @@ from string import whitespace # pylint: disable=W0402
import re
+from cloudinit import type_utils
from cloudinit import util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
@@ -60,7 +61,7 @@ def handle(_name, cfg, cloud, log, _args):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
log.warn("Mount option %s not a list, got a %s instead",
- (i + 1), util.obj_name(cfgmnt[i]))
+ (i + 1), type_utils.obj_name(cfgmnt[i]))
continue
startname = str(cfgmnt[i][0])
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 886487f8..2e058ccd 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
@@ -19,7 +19,6 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import templater
-from cloudinit import url_helper as uhelp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -66,8 +65,8 @@ def handle(name, cfg, cloud, log, args):
tries = int(tries)
except:
tries = 10
- util.logexc(log, ("Configuration entry 'tries'"
- " is not an integer, using %s instead"), tries)
+ util.logexc(log, "Configuration entry 'tries' is not an integer, "
+ "using %s instead", tries)
if post_list == "all":
post_list = POST_LIST_ALL
@@ -86,8 +85,8 @@ def handle(name, cfg, cloud, log, args):
try:
all_keys[n] = util.load_file(path)
except:
- util.logexc(log, ("%s: failed to open, can not"
- " phone home that data!"), path)
+ util.logexc(log, "%s: failed to open, can not phone home that "
+ "data!", path)
submit_keys = {}
for k in post_list:
@@ -112,7 +111,9 @@ def handle(name, cfg, cloud, log, args):
}
url = templater.render_string(url, url_params)
try:
- uhelp.readurl(url, data=real_submit_keys, retries=tries, sec_between=3)
+ util.read_file_or_url(url, data=real_submit_keys,
+ retries=tries, sec_between=3,
+ ssl_details=util.fetch_ssl_details(cloud.paths))
except:
- util.logexc(log, ("Failed to post phone home data to"
- " %s in %s tries"), url, tries)
+ util.logexc(log, "Failed to post phone home data to %s in %s tries",
+ url, tries)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index aefa3aff..188047e5 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -75,7 +75,7 @@ def load_power_state(cfg):
','.join(opt_map.keys()))
delay = pstate.get("delay", "now")
- if delay != "now" and not re.match("\+[0-9]+", delay):
+ if delay != "now" and not re.match(r"\+[0-9]+", delay):
raise TypeError("power_state[delay] must be 'now' or '+m' (minutes).")
args = ["shutdown", opt_map[mode], delay]
@@ -100,7 +100,7 @@ def execmd(exe_args, output=None, data_in=None):
proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
stdout=output, stderr=subprocess.STDOUT)
proc.communicate(data_in)
- ret = proc.returncode
+ ret = proc.returncode # pylint: disable=E1101
except Exception:
doexit(EXIT_FAIL)
doexit(ret)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 70294eda..56040fdd 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -18,50 +18,37 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import errno
import os
import stat
-import time
from cloudinit.settings import PER_ALWAYS
from cloudinit import util
frequency = PER_ALWAYS
-RESIZE_FS_PREFIXES_CMDS = [
- ('ext', 'resize2fs'),
- ('xfs', 'xfs_growfs'),
-]
-NOBLOCK = "noblock"
+def _resize_btrfs(mount_point, devpth): # pylint: disable=W0613
+ return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
-def nodeify_path(devpth, where, log):
- try:
- st_dev = os.stat(where).st_dev
- dev = os.makedev(os.major(st_dev), os.minor(st_dev))
- os.mknod(devpth, 0400 | stat.S_IFBLK, dev)
- return st_dev
- except:
- if util.is_container():
- log.debug("Inside container, ignoring mknod failure in resizefs")
- return
- log.warn("Failed to make device node to resize %s at %s",
- where, devpth)
- raise
+def _resize_ext(mount_point, devpth): # pylint: disable=W0613
+ return ('resize2fs', devpth)
-def get_fs_type(st_dev, path, log):
- try:
- dev_entries = util.find_devs_with(tag='TYPE', oformat='value',
- no_cache=True, path=path)
- if not dev_entries:
- return None
- return dev_entries[0].strip()
- except util.ProcessExecutionError:
- util.logexc(log, ("Failed to get filesystem type"
- " of maj=%s, min=%s for path %s"),
- os.major(st_dev), os.minor(st_dev), path)
- raise
+def _resize_xfs(mount_point, devpth): # pylint: disable=W0613
+ return ('xfs_growfs', devpth)
+
+# Do not use a dictionary as these commands should be able to be used
+# for multiple filesystem types if possible, e.g. one command for
+# ext2, ext3 and ext4.
+RESIZE_FS_PREFIXES_CMDS = [
+ ('btrfs', _resize_btrfs),
+ ('ext', _resize_ext),
+ ('xfs', _resize_xfs),
+]
+
+NOBLOCK = "noblock"
def handle(name, cfg, _cloud, log, args):
@@ -80,62 +67,77 @@ def handle(name, cfg, _cloud, log, args):
# TODO(harlowja): allow what is to be resized to be configurable??
resize_what = "/"
- with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.",
- dir=resize_root_d, delete=True) as tfh:
- devpth = tfh.name
-
- # Delete the file so that mknod will work
- # but don't change the file handle to know that its
- # removed so that when a later call that recreates
- # occurs this temporary file will still benefit from
- # auto deletion
- tfh.unlink_now()
-
- st_dev = nodeify_path(devpth, resize_what, log)
- fs_type = get_fs_type(st_dev, devpth, log)
- if not fs_type:
- log.warn("Could not determine filesystem type of %s", resize_what)
- return
-
- resizer = None
- fstype_lc = fs_type.lower()
- for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
- if fstype_lc.startswith(pfix):
- resizer = root_cmd
- break
-
- if not resizer:
- log.warn("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_what)
- return
-
- log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer)
- resize_cmd = [resizer, devpth]
-
- if resize_root == NOBLOCK:
- # Fork to a child that will run
- # the resize command
- util.fork_cb(do_resize, resize_cmd, log)
- # Don't delete the file now in the parent
- tfh.delete = False
+ result = util.get_mount_info(resize_what, log)
+ if not result:
+ log.warn("Could not determine filesystem type of %s", resize_what)
+ return
+
+ (devpth, fs_type, mount_point) = result
+
+ # Ensure the path is a block device.
+ info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
+ log.debug("resize_info: %s" % info)
+
+ try:
+ statret = os.stat(devpth)
+ except OSError as exc:
+ if util.is_container() and exc.errno == errno.ENOENT:
+ log.debug("Device '%s' did not exist in container. "
+ "cannot resize: %s" % (devpth, info))
+ elif exc.errno == errno.ENOENT:
+ log.warn("Device '%s' did not exist. cannot resize: %s" %
+ (devpth, info))
+ else:
+ raise exc
+ return
+
+ if not stat.S_ISBLK(statret.st_mode):
+ if util.is_container():
+ log.debug("device '%s' not a block device in container."
+ " cannot resize: %s" % (devpth, info))
else:
- do_resize(resize_cmd, log)
+ log.warn("device '%s' not a block device. cannot resize: %s" %
+ (devpth, info))
+ return
+
+ resizer = None
+ fstype_lc = fs_type.lower()
+ for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
+ if fstype_lc.startswith(pfix):
+ resizer = root_cmd
+ break
+
+ if not resizer:
+ log.warn("Not resizing unknown filesystem type %s for %s",
+ fs_type, resize_what)
+ return
+
+ resize_cmd = resizer(resize_what, devpth)
+ log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
+ ' '.join(resize_cmd))
+
+ if resize_root == NOBLOCK:
+ # Fork to a child that will run
+ # the resize command
+ util.fork_cb(
+ util.log_time(logfunc=log.debug, msg="backgrounded Resizing",
+ func=do_resize, args=(resize_cmd, log)))
+ else:
+ util.log_time(logfunc=log.debug, msg="Resizing",
+ func=do_resize, args=(resize_cmd, log))
action = 'Resized'
if resize_root == NOBLOCK:
action = 'Resizing (via forking)'
- log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)",
- action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root)
+ log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
+ resize_root)
def do_resize(resize_cmd, log):
- start = time.time()
try:
util.subp(resize_cmd)
except util.ProcessExecutionError:
util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
raise
- tot_time = time.time() - start
- log.debug("Resizing took %.3f seconds", tot_time)
# TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 8a460f7e..879b62b1 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -1,8 +1,10 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2013 Craig Tracey
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Craig Tracey <craigtracey@gmail.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -53,7 +55,7 @@ from cloudinit import util
frequency = PER_INSTANCE
-distros = ['fedora', 'rhel']
+distros = ['fedora', 'rhel', 'sles']
def generate_resolv_conf(cloud, log, params):
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 4bf18516..c771728d 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
@@ -64,8 +64,8 @@ def handle(name, _cfg, cloud, log, _args):
" raw userdata"), name, MY_HOOKNAME)
return
except:
- util.logexc(log, ("Failed to parse query string %s"
- " into a dictionary"), ud)
+ util.logexc(log, "Failed to parse query string %s into a dictionary",
+ ud)
raise
wrote_fns = []
@@ -86,8 +86,8 @@ def handle(name, _cfg, cloud, log, _args):
wrote_fns.append(fname)
except Exception as e:
captured_excps.append(e)
- util.logexc(log, "%s failed to read %s and write %s",
- MY_NAME, url, fname)
+ util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url,
+ fname)
if wrote_fns:
log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 2b32fc94..5d7f4331 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
@@ -32,6 +32,6 @@ def handle(name, cfg, cloud, log, _args):
log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
cloud.distro.set_hostname(hostname, fqdn)
except Exception:
- util.logexc(log, "Failed to set the hostname to %s (%s)",
- fqdn, hostname)
+ util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn,
+ hostname)
raise
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index c6bf62fd..56a36906 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
@@ -75,14 +75,14 @@ def handle(_name, cfg, cloud, log, args):
plist_in.append("%s:%s" % (u, p))
users.append(u)
- ch_in = '\n'.join(plist_in)
+ ch_in = '\n'.join(plist_in) + '\n'
try:
log.debug("Changing password for %s:", users)
util.subp(['chpasswd'], ch_in)
except Exception as e:
errors.append(e)
- util.logexc(log,
- "Failed to set passwords with chpasswd for %s", users)
+ util.logexc(log, "Failed to set passwords with chpasswd for %s",
+ users)
if len(randlist):
blurb = ("Set the following 'random' passwords\n",
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index b623d476..64a5e3cb 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
@@ -85,8 +85,8 @@ def handle(_name, cfg, cloud, log, _args):
util.subp(cmd, capture=False)
log.debug("Generated a key for %s from %s", pair[0], pair[1])
except:
- util.logexc(log, ("Failed generated a key"
- " for %s from %s"), pair[0], pair[1])
+ util.logexc(log, "Failed generated a key for %s from %s",
+ pair[0], pair[1])
else:
# if not, generate them
genkeys = util.get_cfg_option_list(cfg,
@@ -102,8 +102,8 @@ def handle(_name, cfg, cloud, log, _args):
with util.SeLinuxGuard("/etc/ssh", recursive=True):
util.subp(cmd, capture=False)
except:
- util.logexc(log, ("Failed generating key type"
- " %s to file %s"), keytype, keyfile)
+ util.logexc(log, "Failed generating key type %s to "
+ "file %s", keytype, keyfile)
try:
(users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
@@ -126,7 +126,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
keys = set(keys)
if user:
- ssh_util.setup_user_keys(keys, user, '')
+ ssh_util.setup_user_keys(keys, user)
if disable_root:
if not user:
@@ -135,4 +135,4 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
else:
key_prefix = ''
- ssh_util.setup_user_keys(keys, 'root', key_prefix)
+ ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 83af36e9..50d96e15 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
@@ -71,8 +71,8 @@ def handle(_name, cfg, cloud, log, args):
try:
import_ssh_ids(import_ids, user, log)
except Exception as exc:
- util.logexc(log, "ssh-import-id failed for: %s %s" %
- (user, import_ids), exc)
+ util.logexc(log, "ssh-import-id failed for: %s %s", user,
+ import_ids)
elist.append(exc)
if len(elist):
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index 52225cd8..e396ba13 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
@@ -38,6 +38,6 @@ def handle(name, cfg, cloud, log, _args):
log.debug("Updating hostname to %s (%s)", fqdn, hostname)
cloud.distro.update_hostname(hostname, fqdn, prev_fn)
except Exception:
- util.logexc(log, "Failed to update the hostname to %s (%s)",
- fqdn, hostname)
+ util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn,
+ hostname)
raise
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 0db4aac7..74e95797 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
@@ -31,13 +31,15 @@ import re
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import ssh_util
+from cloudinit import type_utils
from cloudinit import util
from cloudinit.distros.parsers import hosts
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
- 'redhat': ['fedora', 'rhel']
+ 'redhat': ['fedora', 'rhel'],
+ 'suse': ['sles']
}
LOG = logging.getLogger(__name__)
@@ -45,9 +47,11 @@ LOG = logging.getLogger(__name__)
class Distro(object):
__metaclass__ = abc.ABCMeta
+
hosts_fn = "/etc/hosts"
ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
hostname_conf_fn = "/etc/hostname"
+ tz_zone_dir = "/usr/share/zoneinfo"
def __init__(self, name, cfg, paths):
self._paths = paths
@@ -64,6 +68,13 @@ class Distro(object):
# to write this blob out in a distro format
raise NotImplementedError()
+ def _find_tz_file(self, tz):
+ tz_file = os.path.join(self.tz_zone_dir, str(tz))
+ if not os.path.isfile(tz_file):
+ raise IOError(("Invalid timezone %s,"
+ " no file found at %s") % (tz, tz_file))
+ return tz_file
+
def get_option(self, opt_name, default=None):
return self._cfg.get(opt_name, default)
@@ -73,7 +84,7 @@ class Distro(object):
self._apply_hostname(hostname)
@abc.abstractmethod
- def package_command(self, cmd, args=None):
+ def package_command(self, cmd, args=None, pkgs=None):
raise NotImplementedError()
@abc.abstractmethod
@@ -141,8 +152,8 @@ class Distro(object):
try:
util.subp(['hostname', hostname])
except util.ProcessExecutionError:
- util.logexc(LOG, ("Failed to non-persistently adjust"
- " the system hostname to %s"), hostname)
+ util.logexc(LOG, "Failed to non-persistently adjust the system "
+ "hostname to %s", hostname)
@abc.abstractmethod
def _select_hostname(self, hostname, fqdn):
@@ -199,8 +210,8 @@ class Distro(object):
try:
self._write_hostname(hostname, fn)
except IOError:
- util.logexc(LOG, "Failed to write hostname %s to %s",
- hostname, fn)
+ util.logexc(LOG, "Failed to write hostname %s to %s", hostname,
+ fn)
if (sys_hostname and prev_hostname and
sys_hostname != prev_hostname):
@@ -280,15 +291,16 @@ class Distro(object):
def get_default_user(self):
return self.get_option('default_user')
- def create_user(self, name, **kwargs):
+ def add_user(self, name, **kwargs):
"""
- Creates users for the system using the GNU passwd tools. This
- will work on an GNU system. This should be overriden on
- distros where useradd is not desirable or not available.
+ Add a user to the system using standard GNU tools
"""
+ if util.is_user(name):
+ LOG.info("User %s already exists, skipping." % name)
+ return
adduser_cmd = ['useradd', name]
- x_adduser_cmd = ['useradd', name]
+ log_adduser_cmd = ['useradd', name]
# Since we are creating users, we want to carefully validate the
# inputs. If something goes wrong, we can end up with a system
@@ -305,63 +317,65 @@ class Distro(object):
"selinux_user": '--selinux-user',
}
- adduser_opts_flags = {
+ adduser_flags = {
"no_user_group": '--no-user-group',
"system": '--system',
"no_log_init": '--no-log-init',
- "no_create_home": "-M",
}
- redact_fields = ['passwd']
+ redact_opts = ['passwd']
+
+ # Check the values and create the command
+ for key, val in kwargs.iteritems():
+
+ if key in adduser_opts and val and isinstance(val, str):
+ adduser_cmd.extend([adduser_opts[key], val])
- # Now check the value and create the command
- for option in kwargs:
- value = kwargs[option]
- if option in adduser_opts and value \
- and isinstance(value, str):
- adduser_cmd.extend([adduser_opts[option], value])
- # Redact certain fields from the logs
- if option in redact_fields:
- x_adduser_cmd.extend([adduser_opts[option], 'REDACTED'])
- else:
- x_adduser_cmd.extend([adduser_opts[option], value])
- elif option in adduser_opts_flags and value:
- adduser_cmd.append(adduser_opts_flags[option])
# Redact certain fields from the logs
- if option in redact_fields:
- x_adduser_cmd.append('REDACTED')
+ if key in redact_opts:
+ log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
else:
- x_adduser_cmd.append(adduser_opts_flags[option])
+ log_adduser_cmd.extend([adduser_opts[key], val])
- # Default to creating home directory unless otherwise directed
- # Also, we do not create home directories for system users.
- if "no_create_home" not in kwargs and "system" not in kwargs:
- adduser_cmd.append('-m')
+ elif key in adduser_flags and val:
+ adduser_cmd.append(adduser_flags[key])
+ log_adduser_cmd.append(adduser_flags[key])
- # Create the user
- if util.is_user(name):
- LOG.warn("User %s already exists, skipping." % name)
+ # Don't create the home directory if directed so or if the user is a
+ # system user
+ if 'no_create_home' in kwargs or 'system' in kwargs:
+ adduser_cmd.append('-M')
+ log_adduser_cmd.append('-M')
else:
- LOG.debug("Adding user named %s", name)
- try:
- util.subp(adduser_cmd, logstring=x_adduser_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed to create user %s due to error.", e)
- raise e
+ adduser_cmd.append('-m')
+ log_adduser_cmd.append('-m')
+
+ # Run the command
+ LOG.debug("Adding user %s", name)
+ try:
+ util.subp(adduser_cmd, logstring=log_adduser_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to create user %s", name)
+ raise e
+
+ def create_user(self, name, **kwargs):
+ """
+ Creates users for the system using the GNU passwd tools. This
+ will work on an GNU system. This should be overriden on
+ distros where useradd is not desirable or not available.
+ """
- # Set password if plain-text password provided
+ # Add the user
+ self.add_user(name, **kwargs)
+
+ # Set password if plain-text password provided and non-empty
if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
self.set_passwd(name, kwargs['plain_text_passwd'])
# Default locking down the account. 'lock_passwd' defaults to True.
# lock account unless lock_password is False.
if kwargs.get('lock_passwd', True):
- try:
- util.subp(['passwd', '--lock', name])
- except Exception as e:
- util.logexc(LOG, ("Failed to disable password logins for"
- "user %s" % name), e)
- raise e
+ self.lock_passwd(name)
# Configure sudo access
if 'sudo' in kwargs:
@@ -370,21 +384,37 @@ class Distro(object):
# Import SSH keys
if 'ssh_authorized_keys' in kwargs:
keys = set(kwargs['ssh_authorized_keys']) or []
- ssh_util.setup_user_keys(keys, name, key_prefix=None)
+ ssh_util.setup_user_keys(keys, name, options=None)
return True
+ def lock_passwd(self, name):
+ """
+ Lock the password of a user, i.e., disable password logins
+ """
+ try:
+ # Need to use the short option name '-l' instead of '--lock'
+ # (which would be more descriptive) since SLES 11 doesn't know
+ # about long names.
+ util.subp(['passwd', '-l', name])
+ except Exception as e:
+ util.logexc(LOG, 'Failed to disable password for user %s', name)
+ raise e
+
def set_passwd(self, user, passwd, hashed=False):
pass_string = '%s:%s' % (user, passwd)
cmd = ['chpasswd']
if hashed:
- cmd.append('--encrypted')
+ # Need to use the short option name '-e' instead of '--encrypted'
+ # (which would be more descriptive) since SLES 11 doesn't know
+ # about long names.
+ cmd.append('-e')
try:
util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
except Exception as e:
- util.logexc(LOG, "Failed to set password for %s" % user)
+ util.logexc(LOG, "Failed to set password for %s", user)
raise e
return True
@@ -426,7 +456,7 @@ class Distro(object):
util.append_file(sudo_base, sudoers_contents)
LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base))
except IOError as e:
- util.logexc(LOG, "Failed to write %s" % sudo_base, e)
+ util.logexc(LOG, "Failed to write %s", sudo_base)
raise e
util.ensure_dir(path, 0750)
@@ -445,7 +475,7 @@ class Distro(object):
lines.append("%s %s" % (user, rules))
else:
msg = "Can not create sudoers rule addition with type %r"
- raise TypeError(msg % (util.obj_name(rules)))
+ raise TypeError(msg % (type_utils.obj_name(rules)))
content = "\n".join(lines)
content += "\n" # trailing newline
@@ -477,15 +507,15 @@ class Distro(object):
try:
util.subp(group_add_cmd)
LOG.info("Created new group %s" % name)
- except Exception as e:
- util.logexc("Failed to create group %s" % name, e)
+ except Exception:
+ util.logexc("Failed to create group %s", name)
# Add members to the group, if so defined
if len(members) > 0:
for member in members:
if not util.is_user(member):
LOG.warn("Unable to add group member '%s' to group '%s'"
- "; user does not exist." % (member, name))
+ "; user does not exist.", member, name)
continue
util.subp(['usermod', '-a', '-G', name, member])
@@ -568,7 +598,7 @@ def _normalize_groups(grp_cfg):
c_grp_cfg[k] = [v]
else:
raise TypeError("Bad group member type %s" %
- util.obj_name(v))
+ type_utils.obj_name(v))
else:
if isinstance(v, (list)):
c_grp_cfg[k].extend(v)
@@ -576,13 +606,13 @@ def _normalize_groups(grp_cfg):
c_grp_cfg[k].append(v)
else:
raise TypeError("Bad group member type %s" %
- util.obj_name(v))
+ type_utils.obj_name(v))
elif isinstance(i, (str, basestring)):
if i not in c_grp_cfg:
c_grp_cfg[i] = []
else:
raise TypeError("Unknown group name type %s" %
- util.obj_name(i))
+ type_utils.obj_name(i))
grp_cfg = c_grp_cfg
groups = {}
if isinstance(grp_cfg, (dict)):
@@ -591,7 +621,7 @@ def _normalize_groups(grp_cfg):
else:
raise TypeError(("Group config must be list, dict "
" or string types only and not %s") %
- util.obj_name(grp_cfg))
+ type_utils.obj_name(grp_cfg))
return groups
@@ -622,7 +652,7 @@ def _normalize_users(u_cfg, def_user_cfg=None):
ad_ucfg.append(v)
else:
raise TypeError(("Unmappable user value type %s"
- " for key %s") % (util.obj_name(v), k))
+ " for key %s") % (type_utils.obj_name(v), k))
u_cfg = ad_ucfg
elif isinstance(u_cfg, (str, basestring)):
u_cfg = util.uniq_merge_sorted(u_cfg)
@@ -647,7 +677,7 @@ def _normalize_users(u_cfg, def_user_cfg=None):
else:
raise TypeError(("User config must be dictionary/list "
" or string types only and not %s") %
- util.obj_name(user_config))
+ type_utils.obj_name(user_config))
# Ensure user options are in the right python friendly format
if users:
@@ -740,7 +770,7 @@ def normalize_users_groups(cfg, distro):
}
if not isinstance(old_user, (dict)):
LOG.warn(("Format for 'user' key must be a string or "
- "dictionary and not %s"), util.obj_name(old_user))
+ "dictionary and not %s"), type_utils.obj_name(old_user))
old_user = {}
# If no old user format, then assume the distro
@@ -766,7 +796,7 @@ def normalize_users_groups(cfg, distro):
if not isinstance(base_users, (list, dict, str, basestring)):
LOG.warn(("Format for 'users' key must be a comma separated string"
" or a dictionary or a list and not %s"),
- util.obj_name(base_users))
+ type_utils.obj_name(base_users))
base_users = []
if old_user:
@@ -776,7 +806,7 @@ def normalize_users_groups(cfg, distro):
# Just add it on at the end...
base_users.append({'name': 'default'})
elif isinstance(base_users, (dict)):
- base_users['default'] = base_users.get('default', True)
+ base_users['default'] = dict(base_users).get('default', True)
elif isinstance(base_users, (str, basestring)):
# Just append it on to be re-parsed later
base_users += ",default"
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 1a8e927b..8fe49cbe 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -33,6 +33,10 @@ from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
+APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold',
+ '--option=Dpkg::options::=--force-unsafe-io',
+ '--assume-yes', '--quiet')
+
class Distro(distros.Distro):
hostname_conf_fn = "/etc/hostname"
@@ -40,7 +44,6 @@ class Distro(distros.Distro):
network_conf_fn = "/etc/network/interfaces"
tz_conf_fn = "/etc/timezone"
tz_local_fn = "/etc/localtime"
- tz_zone_dir = "/usr/share/zoneinfo"
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -126,12 +129,7 @@ class Distro(distros.Distro):
return "127.0.1.1"
def set_timezone(self, tz):
- # TODO(harlowja): move this code into
- # the parent distro...
- tz_file = os.path.join(self.tz_zone_dir, str(tz))
- if not os.path.isfile(tz_file):
- raise RuntimeError(("Invalid timezone %s,"
- " no file found at %s") % (tz, tz_file))
+ tz_file = self._find_tz_file(tz)
# Note: "" provides trailing newline during join
tz_lines = [
util.make_header(),
@@ -142,20 +140,27 @@ class Distro(distros.Distro):
# This ensures that the correct tz will be used for the system
util.copy(tz_file, self.tz_local_fn)
- def package_command(self, command, args=None, pkgs=[]):
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
e = os.environ.copy()
# See: http://tiny.cc/kg91fw
# Or: http://tiny.cc/mh91fw
e['DEBIAN_FRONTEND'] = 'noninteractive'
- cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold',
- '--assume-yes', '--quiet']
+ cmd = list(self.get_option("apt_get_command", APT_GET_COMMAND))
if args and isinstance(args, str):
cmd.append(args)
elif args and isinstance(args, list):
cmd.extend(args)
- cmd.append(command)
+ subcmd = command
+ if command == "upgrade":
+ subcmd = self.get_option("apt_get_upgrade_subcommand",
+ "dist-upgrade")
+
+ cmd.append(subcmd)
pkglist = util.expand_package_list('%s=%s', pkgs)
cmd.extend(pkglist)
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 5733c25a..1be9d46b 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -137,8 +137,8 @@ class ResolvConf(object):
self._contents.append(('option', ['search', s_list, '']))
return flat_sds
- @local_domain.setter
- def local_domain(self, domain):
+ @local_domain.setter # pl51222 pylint: disable=E1101
+ def local_domain(self, domain): # pl51222 pylint: disable=E0102
self.parse()
self._remove_option('domain')
self._contents.append(('option', ['domain', str(domain), '']))
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 2f91e386..30195384 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
@@ -20,17 +20,12 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
-
from cloudinit import distros
-
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-from cloudinit.distros.parsers.sys_conf import SysConf
-
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -47,12 +42,13 @@ class Distro(distros.Distro):
# See: http://tiny.cc/6r99fw
clock_conf_fn = "/etc/sysconfig/clock"
locale_conf_fn = '/etc/sysconfig/i18n'
+ systemd_locale_conf_fn = '/etc/locale.conf'
network_conf_fn = "/etc/sysconfig/network"
hostname_conf_fn = "/etc/sysconfig/network"
+ systemd_hostname_conf_fn = "/etc/hostname"
network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s'
resolve_conf_fn = "/etc/resolv.conf"
tz_local_fn = "/etc/localtime"
- tz_zone_dir = "/usr/share/zoneinfo"
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -65,33 +61,9 @@ class Distro(distros.Distro):
def install_packages(self, pkglist):
self.package_command('install', pkgs=pkglist)
- def _adjust_resolve(self, dns_servers, search_servers):
- try:
- r_conf = ResolvConf(util.load_file(self.resolve_conf_fn))
- r_conf.parse()
- except IOError:
- util.logexc(LOG,
- "Failed at parsing %s reverting to an empty instance",
- self.resolve_conf_fn)
- r_conf = ResolvConf('')
- r_conf.parse()
- if dns_servers:
- for s in dns_servers:
- try:
- r_conf.add_nameserver(s)
- except ValueError:
- util.logexc(LOG, "Failed at adding nameserver %s", s)
- if search_servers:
- for s in search_servers:
- try:
- r_conf.add_search_domain(s)
- except ValueError:
- util.logexc(LOG, "Failed at adding search domain %s", s)
- util.write_file(self.resolve_conf_fn, str(r_conf), 0644)
-
def _write_network(self, settings):
# TODO(harlowja) fix this... since this is the ubuntu format
- entries = translate_network(settings)
+ entries = rhel_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the rhel format...
@@ -110,54 +82,49 @@ class Distro(distros.Distro):
'MACADDR': info.get('hwaddress'),
'ONBOOT': _make_sysconfig_bool(info.get('auto')),
}
- self._update_sysconfig_file(net_fn, net_cfg)
+ rhel_util.update_sysconfig_file(net_fn, net_cfg)
if 'dns-nameservers' in info:
nameservers.extend(info['dns-nameservers'])
if 'dns-search' in info:
searchservers.extend(info['dns-search'])
if nameservers or searchservers:
- self._adjust_resolve(nameservers, searchservers)
+ rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
+ nameservers, searchservers)
if dev_names:
net_cfg = {
'NETWORKING': _make_sysconfig_bool(True),
}
- self._update_sysconfig_file(self.network_conf_fn, net_cfg)
+ rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
return dev_names
- def _update_sysconfig_file(self, fn, adjustments, allow_empty=False):
- if not adjustments:
- return
- (exists, contents) = self._read_conf(fn)
- updated_am = 0
- for (k, v) in adjustments.items():
- if v is None:
- continue
- v = str(v)
- if len(v) == 0 and not allow_empty:
- continue
- contents[k] = v
- updated_am += 1
- if updated_am:
- lines = [
- str(contents),
- ]
- if not exists:
- lines.insert(0, util.make_header())
- util.write_file(fn, "\n".join(lines), 0644)
+ def _dist_uses_systemd(self):
+ # Fedora 18 and RHEL 7 were the first adopters in their series
+ (dist, vers) = util.system_info()['dist'][:2]
+ major = (int)(vers.split('.')[0])
+ return ((dist.startswith('Red Hat Enterprise Linux') and major >= 7)
+ or (dist.startswith('Fedora') and major >= 18))
def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
+ if self._dist_uses_systemd():
+ if not out_fn:
+ out_fn = self.systemd_locale_conf_fn
+ out_fn = self.systemd_locale_conf_fn
+ else:
+ if not out_fn:
+ out_fn = self.locale_conf_fn
locale_cfg = {
'LANG': locale,
}
- self._update_sysconfig_file(out_fn, locale_cfg)
+ rhel_util.update_sysconfig_file(out_fn, locale_cfg)
def _write_hostname(self, hostname, out_fn):
- host_cfg = {
- 'HOSTNAME': hostname,
- }
- self._update_sysconfig_file(out_fn, host_cfg)
+ if self._dist_uses_systemd():
+ util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ else:
+ host_cfg = {
+ 'HOSTNAME': hostname,
+ }
+ rhel_util.update_sysconfig_file(out_fn, host_cfg)
def _select_hostname(self, hostname, fqdn):
# See: http://bit.ly/TwitgL
@@ -167,25 +134,25 @@ class Distro(distros.Distro):
return hostname
def _read_system_hostname(self):
- return (self.network_conf_fn,
- self._read_hostname(self.network_conf_fn))
+ if self._dist_uses_systemd():
+ host_fn = self.systemd_hostname_conf_fn
+ else:
+ host_fn = self.hostname_conf_fn
+ return (host_fn, self._read_hostname(host_fn))
def _read_hostname(self, filename, default=None):
- (_exists, contents) = self._read_conf(filename)
- if 'HOSTNAME' in contents:
- return contents['HOSTNAME']
+ if self._dist_uses_systemd():
+ (out, _err) = util.subp(['hostname'])
+ if len(out):
+ return out
+ else:
+ return default
else:
- return default
-
- def _read_conf(self, fn):
- exists = False
- try:
- contents = util.load_file(fn).splitlines()
- exists = True
- except IOError:
- contents = []
- return (exists,
- SysConf(contents))
+ (_exists, contents) = rhel_util.read_sysconfig_file(filename)
+ if 'HOSTNAME' in contents:
+ return contents['HOSTNAME']
+ else:
+ return default
def _bring_up_interfaces(self, device_names):
if device_names and 'all' in device_names:
@@ -194,21 +161,25 @@ class Distro(distros.Distro):
return distros.Distro._bring_up_interfaces(self, device_names)
def set_timezone(self, tz):
- # TODO(harlowja): move this code into
- # the parent distro...
- tz_file = os.path.join(self.tz_zone_dir, str(tz))
- if not os.path.isfile(tz_file):
- raise RuntimeError(("Invalid timezone %s,"
- " no file found at %s") % (tz, tz_file))
- # Adjust the sysconfig clock zone setting
- clock_cfg = {
- 'ZONE': str(tz),
- }
- self._update_sysconfig_file(self.clock_conf_fn, clock_cfg)
- # This ensures that the correct tz will be used for the system
- util.copy(tz_file, self.tz_local_fn)
+ tz_file = self._find_tz_file(tz)
+ if self._dist_uses_systemd():
+ # Currently, timedatectl complains if invoked during startup
+ # so for compatibility, create the link manually.
+ util.del_file(self.tz_local_fn)
+ util.sym_link(tz_file, self.tz_local_fn)
+ else:
+ # Adjust the sysconfig clock zone setting
+ clock_cfg = {
+ 'ZONE': str(tz),
+ }
+ rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
+ # This ensures that the correct tz will be used for the system
+ util.copy(tz_file, self.tz_local_fn)
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
- def package_command(self, command, args=None, pkgs=[]):
cmd = ['yum']
# If enabled, then yum will be tolerant of errors on the command line
# with regard to packages.
@@ -236,90 +207,3 @@ class Distro(distros.Distro):
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
["makecache"], freq=PER_INSTANCE)
-
-
-# This is a util function to translate a ubuntu /etc/network/interfaces 'blob'
-# to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/
-# TODO(harlowja) remove when we have python-netcf active...
-def translate_network(settings):
- # Get the standard cmd, args from the ubuntu format
- entries = []
- for line in settings.splitlines():
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- split_up = line.split(None, 1)
- if len(split_up) <= 1:
- continue
- entries.append(split_up)
- # Figure out where each iface section is
- ifaces = []
- consume = {}
- for (cmd, args) in entries:
- if cmd == 'iface':
- if consume:
- ifaces.append(consume)
- consume = {}
- consume[cmd] = args
- else:
- consume[cmd] = args
- # Check if anything left over to consume
- absorb = False
- for (cmd, args) in consume.iteritems():
- if cmd == 'iface':
- absorb = True
- if absorb:
- ifaces.append(consume)
- # Now translate
- real_ifaces = {}
- for info in ifaces:
- if 'iface' not in info:
- continue
- iface_details = info['iface'].split(None)
- dev_name = None
- if len(iface_details) >= 1:
- dev = iface_details[0].strip().lower()
- if dev:
- dev_name = dev
- if not dev_name:
- continue
- iface_info = {}
- if len(iface_details) >= 3:
- proto_type = iface_details[2].strip().lower()
- # Seems like this can be 'loopback' which we don't
- # really care about
- if proto_type in ['dhcp', 'static']:
- iface_info['bootproto'] = proto_type
- # These can just be copied over
- for k in ['netmask', 'address', 'gateway', 'broadcast']:
- if k in info:
- val = info[k].strip().lower()
- if val:
- iface_info[k] = val
- # Name server info provided??
- if 'dns-nameservers' in info:
- iface_info['dns-nameservers'] = info['dns-nameservers'].split()
- # Name server search info provided??
- if 'dns-search' in info:
- iface_info['dns-search'] = info['dns-search'].split()
- # Is any mac address spoofing going on??
- if 'hwaddress' in info:
- hw_info = info['hwaddress'].lower().strip()
- hw_split = hw_info.split(None, 1)
- if len(hw_split) == 2 and hw_split[0].startswith('ether'):
- hw_addr = hw_split[1]
- if hw_addr:
- iface_info['hwaddress'] = hw_addr
- real_ifaces[dev_name] = iface_info
- # Check for those that should be started on boot via 'auto'
- for (cmd, args) in entries:
- if cmd == 'auto':
- # Seems like auto can be like 'auto eth0 eth0:1' so just get the
- # first part out as the device name
- args = args.split(None)
- if not args:
- continue
- dev_name = args[0].strip().lower()
- if dev_name in real_ifaces:
- real_ifaces[dev_name]['auto'] = True
- return real_ifaces
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
new file mode 100644
index 00000000..1aba58b8
--- /dev/null
+++ b/cloudinit/distros/rhel_util.py
@@ -0,0 +1,177 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from cloudinit.distros.parsers.resolv_conf import ResolvConf
+from cloudinit.distros.parsers.sys_conf import SysConf
+
+from cloudinit import log as logging
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+# This is a util function to translate Debian based distro interface blobs as
+# given in /etc/network/interfaces to an equivalent format for distributions
+# that use ifcfg-* style (Red Hat and SUSE).
+# TODO(harlowja) remove when we have python-netcf active...
+def translate_network(settings):
+ # Get the standard cmd, args from the ubuntu format
+ entries = []
+ for line in settings.splitlines():
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ split_up = line.split(None, 1)
+ if len(split_up) <= 1:
+ continue
+ entries.append(split_up)
+ # Figure out where each iface section is
+ ifaces = []
+ consume = {}
+ for (cmd, args) in entries:
+ if cmd == 'iface':
+ if consume:
+ ifaces.append(consume)
+ consume = {}
+ consume[cmd] = args
+ else:
+ consume[cmd] = args
+ # Check if anything left over to consume
+ absorb = False
+ for (cmd, args) in consume.iteritems():
+ if cmd == 'iface':
+ absorb = True
+ if absorb:
+ ifaces.append(consume)
+ # Now translate
+ real_ifaces = {}
+ for info in ifaces:
+ if 'iface' not in info:
+ continue
+ iface_details = info['iface'].split(None)
+ dev_name = None
+ if len(iface_details) >= 1:
+ dev = iface_details[0].strip().lower()
+ if dev:
+ dev_name = dev
+ if not dev_name:
+ continue
+ iface_info = {}
+ if len(iface_details) >= 3:
+ proto_type = iface_details[2].strip().lower()
+ # Seems like this can be 'loopback' which we don't
+ # really care about
+ if proto_type in ['dhcp', 'static']:
+ iface_info['bootproto'] = proto_type
+ # These can just be copied over
+ for k in ['netmask', 'address', 'gateway', 'broadcast']:
+ if k in info:
+ val = info[k].strip().lower()
+ if val:
+ iface_info[k] = val
+ # Name server info provided??
+ if 'dns-nameservers' in info:
+ iface_info['dns-nameservers'] = info['dns-nameservers'].split()
+ # Name server search info provided??
+ if 'dns-search' in info:
+ iface_info['dns-search'] = info['dns-search'].split()
+ # Is any mac address spoofing going on??
+ if 'hwaddress' in info:
+ hw_info = info['hwaddress'].lower().strip()
+ hw_split = hw_info.split(None, 1)
+ if len(hw_split) == 2 and hw_split[0].startswith('ether'):
+ hw_addr = hw_split[1]
+ if hw_addr:
+ iface_info['hwaddress'] = hw_addr
+ real_ifaces[dev_name] = iface_info
+ # Check for those that should be started on boot via 'auto'
+ for (cmd, args) in entries:
+ if cmd == 'auto':
+ # Seems like auto can be like 'auto eth0 eth0:1' so just get the
+ # first part out as the device name
+ args = args.split(None)
+ if not args:
+ continue
+ dev_name = args[0].strip().lower()
+ if dev_name in real_ifaces:
+ real_ifaces[dev_name]['auto'] = True
+ return real_ifaces
+
+
+# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
+def update_sysconfig_file(fn, adjustments, allow_empty=False):
+ if not adjustments:
+ return
+ (exists, contents) = read_sysconfig_file(fn)
+ updated_am = 0
+ for (k, v) in adjustments.items():
+ if v is None:
+ continue
+ v = str(v)
+ if len(v) == 0 and not allow_empty:
+ continue
+ contents[k] = v
+ updated_am += 1
+ if updated_am:
+ lines = [
+ str(contents),
+ ]
+ if not exists:
+ lines.insert(0, util.make_header())
+ util.write_file(fn, "\n".join(lines) + "\n", 0644)
+
+
+# Helper function to read a RHEL/SUSE /etc/sysconfig/* file
+def read_sysconfig_file(fn):
+ exists = False
+ try:
+ contents = util.load_file(fn).splitlines()
+ exists = True
+ except IOError:
+ contents = []
+ return (exists, SysConf(contents))
+
+
+# Helper function to update RHEL/SUSE /etc/resolv.conf
+def update_resolve_conf_file(fn, dns_servers, search_servers):
+ try:
+ r_conf = ResolvConf(util.load_file(fn))
+ r_conf.parse()
+ except IOError:
+ util.logexc(LOG, "Failed at parsing %s reverting to an empty "
+ "instance", fn)
+ r_conf = ResolvConf('')
+ r_conf.parse()
+ if dns_servers:
+ for s in dns_servers:
+ try:
+ r_conf.add_nameserver(s)
+ except ValueError:
+ util.logexc(LOG, "Failed at adding nameserver %s", s)
+ if search_servers:
+ for s in search_servers:
+ try:
+ r_conf.add_search_domain(s)
+ except ValueError:
+ util.logexc(LOG, "Failed at adding search domain %s", s)
+ util.write_file(fn, str(r_conf), 0644)
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
new file mode 100644
index 00000000..f2ac4efc
--- /dev/null
+++ b/cloudinit/distros/sles.py
@@ -0,0 +1,185 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# Leaning very heavily on the RHEL and Debian implementation
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import distros
+
+from cloudinit.distros.parsers.hostname import HostnameConf
+
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.distros import rhel_util
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+ clock_conf_fn = '/etc/sysconfig/clock'
+ locale_conf_fn = '/etc/sysconfig/language'
+ network_conf_fn = '/etc/sysconfig/network'
+ hostname_conf_fn = '/etc/HOSTNAME'
+ network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
+ resolve_conf_fn = '/etc/resolv.conf'
+ tz_local_fn = '/etc/localtime'
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = 'suse'
+
+ def install_packages(self, pkglist):
+ self.package_command('install', args='-l', pkgs=pkglist)
+
+ def _write_network(self, settings):
+ # Convert debian settings to ifcfg format
+ entries = rhel_util.translate_network(settings)
+ LOG.debug("Translated ubuntu style network settings %s into %s",
+ settings, entries)
+ # Make the intermediate format as the suse format...
+ nameservers = []
+ searchservers = []
+ dev_names = entries.keys()
+ for (dev, info) in entries.iteritems():
+ net_fn = self.network_script_tpl % (dev)
+ mode = info.get('auto')
+ if mode and mode.lower() == 'true':
+ mode = 'auto'
+ else:
+ mode = 'manual'
+ net_cfg = {
+ 'BOOTPROTO': info.get('bootproto'),
+ 'BROADCAST': info.get('broadcast'),
+ 'GATEWAY': info.get('gateway'),
+ 'IPADDR': info.get('address'),
+ 'LLADDR': info.get('hwaddress'),
+ 'NETMASK': info.get('netmask'),
+ 'STARTMODE': mode,
+ 'USERCONTROL': 'no'
+ }
+ if dev != 'lo':
+ net_cfg['ETHERDEVICE'] = dev
+ net_cfg['ETHTOOL_OPTIONS'] = ''
+ else:
+ net_cfg['FIREWALL'] = 'no'
+ rhel_util.update_sysconfig_file(net_fn, net_cfg, True)
+ if 'dns-nameservers' in info:
+ nameservers.extend(info['dns-nameservers'])
+ if 'dns-search' in info:
+ searchservers.extend(info['dns-search'])
+ if nameservers or searchservers:
+ rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
+ nameservers, searchservers)
+ return dev_names
+
+ def apply_locale(self, locale, out_fn=None):
+ if not out_fn:
+ out_fn = self.locale_conf_fn
+ locale_cfg = {
+ 'RC_LANG': locale,
+ }
+ rhel_util.update_sysconfig_file(out_fn, locale_cfg)
+
+ def _write_hostname(self, hostname, out_fn):
+ conf = None
+ try:
+ # Try to update the previous one
+ # so lets see if we can read it first.
+ conf = self._read_hostname_conf(out_fn)
+ except IOError:
+ pass
+ if not conf:
+ conf = HostnameConf('')
+ conf.set_hostname(hostname)
+ util.write_file(out_fn, str(conf), 0644)
+
+ def _select_hostname(self, hostname, fqdn):
+ # Prefer the short hostname over the long
+ # fully qualified domain name
+ if not hostname:
+ return fqdn
+ return hostname
+
+ def _read_system_hostname(self):
+ host_fn = self.hostname_conf_fn
+ return (host_fn, self._read_hostname(host_fn))
+
+ def _read_hostname_conf(self, filename):
+ conf = HostnameConf(util.load_file(filename))
+ conf.parse()
+ return conf
+
+ def _read_hostname(self, filename, default=None):
+ hostname = None
+ try:
+ conf = self._read_hostname_conf(filename)
+ hostname = conf.hostname
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
+
+ def _bring_up_interfaces(self, device_names):
+ if device_names and 'all' in device_names:
+ raise RuntimeError(('Distro %s can not translate '
+ 'the device name "all"') % (self.name))
+ return distros.Distro._bring_up_interfaces(self, device_names)
+
+ def set_timezone(self, tz):
+ tz_file = self._find_tz_file(tz)
+ # Adjust the sysconfig clock zone setting
+ clock_cfg = {
+ 'TIMEZONE': str(tz),
+ }
+ rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
+ # This ensures that the correct tz will be used for the system
+ util.copy(tz_file, self.tz_local_fn)
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ cmd = ['zypper']
+ # No user interaction possible, enable non-interactive mode
+ cmd.append('--non-interactive')
+
+ # Comand is the operation, such as install
+ cmd.append(command)
+
+ # args are the arguments to the command, not global options
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ util.subp(cmd, capture=False)
+
+ def update_package_sources(self):
+ self._runner.run("update-sources", self.package_command,
+ ['refresh'], freq=PER_INSTANCE)
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 46b93f39..fcd511c5 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -28,6 +28,10 @@ import boto.utils as boto_utils
# would have existed) do not exist due to the blocking
# that occurred.
+# TODO(harlowja): https://github.com/boto/boto/issues/1401
+# When boto finally moves to using requests, we should be able
+# to provide it ssl details, it does not yet, so we can't provide them...
+
def _unlazy_dict(mp):
if not isinstance(mp, (dict)):
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 8d6dcd4d..2ddc75f4 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
@@ -27,6 +27,7 @@ from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import type_utils
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -61,6 +62,7 @@ INCLUSION_TYPES_MAP = {
'#part-handler': 'text/part-handler',
'#cloud-boothook': 'text/cloud-boothook',
'#cloud-config-archive': 'text/cloud-config-archive',
+ '#cloud-config-jsonp': 'text/cloud-config-jsonp',
}
# Sorted longest first
@@ -69,7 +71,6 @@ INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()),
class Handler(object):
-
__metaclass__ = abc.ABCMeta
def __init__(self, frequency, version=2):
@@ -77,53 +78,65 @@ class Handler(object):
self.frequency = frequency
def __repr__(self):
- return "%s: [%s]" % (util.obj_name(self), self.list_types())
+ return "%s: [%s]" % (type_utils.obj_name(self), self.list_types())
@abc.abstractmethod
def list_types(self):
raise NotImplementedError()
- def handle_part(self, data, ctype, filename, payload, frequency):
- return self._handle_part(data, ctype, filename, payload, frequency)
-
@abc.abstractmethod
- def _handle_part(self, data, ctype, filename, payload, frequency):
+ def handle_part(self, *args, **kwargs):
raise NotImplementedError()
-def run_part(mod, data, ctype, filename, payload, frequency):
+def run_part(mod, data, filename, payload, frequency, headers):
mod_freq = mod.frequency
if not (mod_freq == PER_ALWAYS or
(frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)):
return
- mod_ver = mod.handler_version
# Sanity checks on version (should be an int convertable)
try:
+ mod_ver = mod.handler_version
mod_ver = int(mod_ver)
- except:
+ except (TypeError, ValueError, AttributeError):
mod_ver = 1
+ content_type = headers['Content-Type']
try:
LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s",
- mod, ctype, filename, mod_ver, frequency)
- if mod_ver >= 2:
+ mod, content_type, filename, mod_ver, frequency)
+ if mod_ver == 3:
+ # Treat as v. 3 which does get a frequency + headers
+ mod.handle_part(data, content_type, filename,
+ payload, frequency, headers)
+ elif mod_ver == 2:
# Treat as v. 2 which does get a frequency
- mod.handle_part(data, ctype, filename, payload, frequency)
- else:
+ mod.handle_part(data, content_type, filename,
+ payload, frequency)
+ elif mod_ver == 1:
# Treat as v. 1 which gets no frequency
- mod.handle_part(data, ctype, filename, payload)
+ mod.handle_part(data, content_type, filename, payload)
+ else:
+ raise ValueError("Unknown module version %s" % (mod_ver))
except:
- util.logexc(LOG, ("Failed calling handler %s (%s, %s, %s)"
- " with frequency %s"),
- mod, ctype, filename,
- mod_ver, frequency)
+ util.logexc(LOG, "Failed calling handler %s (%s, %s, %s) with "
+ "frequency %s", mod, content_type, filename, mod_ver,
+ frequency)
def call_begin(mod, data, frequency):
- run_part(mod, data, CONTENT_START, None, None, frequency)
+ # Create a fake header set
+ headers = {
+ 'Content-Type': CONTENT_START,
+ }
+ run_part(mod, data, None, None, frequency, headers)
def call_end(mod, data, frequency):
- run_part(mod, data, CONTENT_END, None, None, frequency)
+ # Create a fake header set
+ headers = {
+ 'Content-Type': CONTENT_END,
+ }
+ run_part(mod, data, None, None, frequency, headers)
def walker_handle_handler(pdata, _ctype, _filename, payload):
@@ -139,14 +152,13 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):
try:
mod = fixup_handler(importer.import_module(modname))
call_begin(mod, pdata['data'], frequency)
- # Only register and increment
- # after the above have worked (so we don't if it
- # fails)
- handlers.register(mod)
+ # Only register and increment after the above have worked, so we don't
+ # register if it fails starting.
+ handlers.register(mod, initialized=True)
pdata['handlercount'] = curcount + 1
except:
- util.logexc(LOG, ("Failed at registering python file: %s"
- " (part handler %s)"), modfname, curcount)
+ util.logexc(LOG, "Failed at registering python file: %s (part "
+ "handler %s)", modfname, curcount)
def _extract_first_or_bytes(blob, size):
@@ -173,26 +185,27 @@ def _escape_string(text):
return text
-def walker_callback(pdata, ctype, filename, payload):
- if ctype in PART_CONTENT_TYPES:
- walker_handle_handler(pdata, ctype, filename, payload)
+def walker_callback(data, filename, payload, headers):
+ content_type = headers['Content-Type']
+ if content_type in PART_CONTENT_TYPES:
+ walker_handle_handler(data, content_type, filename, payload)
return
- handlers = pdata['handlers']
- if ctype in pdata['handlers']:
- run_part(handlers[ctype], pdata['data'], ctype, filename,
- payload, pdata['frequency'])
+ handlers = data['handlers']
+ if content_type in handlers:
+ run_part(handlers[content_type], data['data'], filename,
+ payload, data['frequency'], headers)
elif payload:
# Extract the first line or 24 bytes for displaying in the log
start = _extract_first_or_bytes(payload, 24)
details = "'%s...'" % (_escape_string(start))
- if ctype == NOT_MULTIPART_TYPE:
+ if content_type == NOT_MULTIPART_TYPE:
LOG.warning("Unhandled non-multipart (%s) userdata: %s",
- ctype, details)
+ content_type, details)
else:
LOG.warning("Unhandled unknown content-type (%s) userdata: %s",
- ctype, details)
+ content_type, details)
else:
- LOG.debug("empty payload of type %s" % ctype)
+ LOG.debug("Empty payload of type %s", content_type)
# Callback is a function that will be called with
@@ -212,7 +225,10 @@ def walk(msg, callback, data):
if not filename:
filename = PART_FN_TPL % (partnum)
- callback(data, ctype, filename, part.get_payload(decode=True))
+ headers = dict(part)
+ LOG.debug(headers)
+ headers['Content-Type'] = ctype
+ callback(data, filename, part.get_payload(decode=True), headers)
partnum = partnum + 1
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index 456b8020..1848ce2c 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -29,6 +29,7 @@ from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
LOG = logging.getLogger(__name__)
+BOOTHOOK_PREFIX = "#cloud-boothook"
class BootHookPartHandler(handlers.Handler):
@@ -41,22 +42,19 @@ class BootHookPartHandler(handlers.Handler):
def list_types(self):
return [
- handlers.type_from_starts_with("#cloud-boothook"),
+ handlers.type_from_starts_with(BOOTHOOK_PREFIX),
]
def _write_part(self, payload, filename):
filename = util.clean_filename(filename)
- payload = util.dos2unix(payload)
- prefix = "#cloud-boothook"
- start = 0
- if payload.startswith(prefix):
- start = len(prefix) + 1
filepath = os.path.join(self.boothook_dir, filename)
- contents = payload[start:]
- util.write_file(filepath, contents, 0700)
+ contents = util.strip_prefix_suffix(util.dos2unix(payload),
+ prefix=BOOTHOOK_PREFIX)
+ util.write_file(filepath, contents.lstrip(), 0700)
return filepath
- def _handle_part(self, _data, ctype, filename, payload, _frequency):
+ def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
+ payload, frequency): # pylint: disable=W0613
if ctype in handlers.CONTENT_SIGNALS:
return
@@ -69,5 +67,5 @@ class BootHookPartHandler(handlers.Handler):
except util.ProcessExecutionError:
util.logexc(LOG, "Boothooks script %s execution error", filepath)
except Exception:
- util.logexc(LOG, ("Boothooks unknown "
- "error when running %s"), filepath)
+ util.logexc(LOG, "Boothooks unknown error when running %s",
+ filepath)
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index f6d95244..34a73115 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -20,43 +20,143 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import jsonpatch
+
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import mergers
from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
LOG = logging.getLogger(__name__)
+MERGE_HEADER = 'Merge-Type'
+
+# Due to the way the loading of yaml configuration was done previously,
+# where previously each cloud config part was appended to a larger yaml
+# file and then finally that file was loaded as one big yaml file we need
+# to mimic that behavior by altering the default strategy to be replacing
+# keys of prior merges.
+#
+#
+# For example
+# #file 1
+# a: 3
+# #file 2
+# a: 22
+# #combined file (comments not included)
+# a: 3
+# a: 22
+#
+# This gets loaded into yaml with final result {'a': 22}
+DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()')
+CLOUD_PREFIX = "#cloud-config"
+JSONP_PREFIX = "#cloud-config-jsonp"
+
+# The file header -> content types this module will handle.
+CC_TYPES = {
+ JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX),
+ CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX),
+}
+
class CloudConfigPartHandler(handlers.Handler):
def __init__(self, paths, **_kwargs):
- handlers.Handler.__init__(self, PER_ALWAYS)
- self.cloud_buf = []
+ handlers.Handler.__init__(self, PER_ALWAYS, version=3)
+ self.cloud_buf = None
self.cloud_fn = paths.get_ipath("cloud_config")
+ self.file_names = []
def list_types(self):
- return [
- handlers.type_from_starts_with("#cloud-config"),
- ]
+ return list(CC_TYPES.values())
- def _write_cloud_config(self, buf):
+ def _write_cloud_config(self):
if not self.cloud_fn:
return
- lines = [str(b) for b in buf]
- payload = "\n".join(lines)
- util.write_file(self.cloud_fn, payload, 0600)
+ # Capture which files we merged from...
+ file_lines = []
+ if self.file_names:
+ file_lines.append("# from %s files" % (len(self.file_names)))
+ for fn in self.file_names:
+ if not fn:
+ fn = '?'
+ file_lines.append("# %s" % (fn))
+ file_lines.append("")
+ if self.cloud_buf is not None:
+ # Something was actually gathered....
+ lines = [
+ CLOUD_PREFIX,
+ '',
+ ]
+ lines.extend(file_lines)
+ lines.append(util.yaml_dumps(self.cloud_buf))
+ else:
+ lines = []
+ util.write_file(self.cloud_fn, "\n".join(lines), 0600)
+
+ def _extract_mergers(self, payload, headers):
+ merge_header_headers = ''
+ for h in [MERGE_HEADER, 'X-%s' % (MERGE_HEADER)]:
+ tmp_h = headers.get(h, '')
+ if tmp_h:
+ merge_header_headers = tmp_h
+ break
+ # Select either the merge-type from the content
+ # or the merge type from the headers or default to our own set
+ # if neither exists (or is empty) from the later.
+ payload_yaml = util.load_yaml(payload)
+ mergers_yaml = mergers.dict_extract_mergers(payload_yaml)
+ mergers_header = mergers.string_extract_mergers(merge_header_headers)
+ all_mergers = []
+ all_mergers.extend(mergers_yaml)
+ all_mergers.extend(mergers_header)
+ if not all_mergers:
+ all_mergers = DEF_MERGERS
+ return (payload_yaml, all_mergers)
+
+ def _merge_patch(self, payload):
+ # JSON doesn't handle comments in this manner, so ensure that
+ # if we started with this 'type' that we remove it before
+ # attempting to load it as json (which the jsonpatch library will
+ # attempt to do).
+ payload = payload.lstrip()
+ payload = util.strip_prefix_suffix(payload, prefix=JSONP_PREFIX)
+ patch = jsonpatch.JsonPatch.from_string(payload)
+ LOG.debug("Merging by applying json patch %s", patch)
+ self.cloud_buf = patch.apply(self.cloud_buf, in_place=False)
- def _handle_part(self, _data, ctype, filename, payload, _frequency):
+ def _merge_part(self, payload, headers):
+ (payload_yaml, my_mergers) = self._extract_mergers(payload, headers)
+ LOG.debug("Merging by applying %s", my_mergers)
+ merger = mergers.construct(my_mergers)
+ self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml)
+
+ def _reset(self):
+ self.file_names = []
+ self.cloud_buf = None
+
+ def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
+ payload, _frequency, headers): # pylint: disable=W0613
if ctype == handlers.CONTENT_START:
- self.cloud_buf = []
+ self._reset()
return
if ctype == handlers.CONTENT_END:
- self._write_cloud_config(self.cloud_buf)
- self.cloud_buf = []
+ self._write_cloud_config()
+ self._reset()
return
-
- filename = util.clean_filename(filename)
- if not filename:
- filename = '??'
- self.cloud_buf.extend(["#%s" % (filename), str(payload)])
+ try:
+ # First time through, merge with an empty dict...
+ if self.cloud_buf is None or not self.file_names:
+ self.cloud_buf = {}
+ if ctype == CC_TYPES[JSONP_PREFIX]:
+ self._merge_patch(payload)
+ else:
+ self._merge_part(payload, headers)
+ # Ensure filename is ok to store
+ for i in ("\n", "\r", "\t"):
+ filename = filename.replace(i, " ")
+ self.file_names.append(filename.strip())
+ except:
+ util.logexc(LOG, "Failed at merging in cloud config part from %s",
+ filename)
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index 6c5c11ca..62289d98 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -29,6 +29,7 @@ from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
LOG = logging.getLogger(__name__)
+SHELL_PREFIX = "#!"
class ShellScriptPartHandler(handlers.Handler):
@@ -38,10 +39,11 @@ class ShellScriptPartHandler(handlers.Handler):
def list_types(self):
return [
- handlers.type_from_starts_with("#!"),
+ handlers.type_from_starts_with(SHELL_PREFIX),
]
- def _handle_part(self, _data, ctype, filename, payload, _frequency):
+ def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
+ payload, frequency): # pylint: disable=W0613
if ctype in handlers.CONTENT_SIGNALS:
# TODO(harlowja): maybe delete existing things here
return
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 4684f7f2..bac4cad2 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -22,6 +22,7 @@
import os
+import re
from cloudinit import handlers
from cloudinit import log as logging
@@ -30,6 +31,7 @@ from cloudinit import util
from cloudinit.settings import (PER_INSTANCE)
LOG = logging.getLogger(__name__)
+UPSTART_PREFIX = "#upstart-job"
class UpstartJobPartHandler(handlers.Handler):
@@ -39,10 +41,11 @@ class UpstartJobPartHandler(handlers.Handler):
def list_types(self):
return [
- handlers.type_from_starts_with("#upstart-job"),
+ handlers.type_from_starts_with(UPSTART_PREFIX),
]
- def _handle_part(self, _data, ctype, filename, payload, frequency):
+ def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
+ payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
return
@@ -65,6 +68,53 @@ class UpstartJobPartHandler(handlers.Handler):
path = os.path.join(self.upstart_dir, filename)
util.write_file(path, payload, 0644)
- # if inotify support is not present in the root filesystem
- # (overlayroot) then we need to tell upstart to re-read /etc
- util.subp(["initctl", "reload-configuration"], capture=False)
+ if SUITABLE_UPSTART:
+ util.subp(["initctl", "reload-configuration"], capture=False)
+
+
+def _has_suitable_upstart():
+ # (LP: #1124384)
+ # a bug in upstart means that invoking reload-configuration
+ # at this stage in boot causes havoc. So, try to determine if upstart
+ # is installed, and reloading configuration is OK.
+ if not os.path.exists("/sbin/initctl"):
+ return False
+ try:
+ (version_out, _err) = util.subp(["initctl", "version"])
+ except:
+ util.logexc(LOG, "initctl version failed")
+ return False
+
+ # expecting 'initctl version' to output something like: init (upstart X.Y)
+ if re.match("upstart 1.[0-7][)]", version_out):
+ return False
+ if "upstart 0." in version_out:
+ return False
+ elif "upstart 1.8" in version_out:
+ if not os.path.exists("/usr/bin/dpkg-query"):
+ return False
+ try:
+ (dpkg_ver, _err) = util.subp(["dpkg-query",
+ "--showformat=${Version}",
+ "--show", "upstart"], rcs=[0, 1])
+ except Exception:
+ util.logexc(LOG, "dpkg-query failed")
+ return False
+
+ try:
+ good = "1.8-0ubuntu1.2"
+ util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
+ return True
+ except util.ProcessExecutionError as e:
+ if e.exit_code is 1:
+ pass
+ else:
+ util.logexc(LOG, "dpkg --compare-versions failed [%s]",
+ e.exit_code)
+ except Exception as e:
+ util.logexc(LOG, "dpkg --compare-versions failed")
+ return False
+ else:
+ return True
+
+SUITABLE_UPSTART = _has_suitable_upstart()
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 2077401c..1c46efde 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
@@ -32,6 +32,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
CFG_ENV_NAME)
from cloudinit import log as logging
+from cloudinit import type_utils
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -68,7 +69,7 @@ class FileLock(object):
self.fn = fn
def __str__(self):
- return "<%s using file %r>" % (util.obj_name(self), self.fn)
+ return "<%s using file %r>" % (type_utils.obj_name(self), self.fn)
def canon_sem_name(name):
@@ -215,8 +216,8 @@ class ConfigMerger(object):
if ds_cfg and isinstance(ds_cfg, (dict)):
d_cfgs.append(ds_cfg)
except:
- util.logexc(LOG, ("Failed loading of datasource"
- " config object from %s"), self._ds)
+ util.logexc(LOG, "Failed loading of datasource config object "
+ "from %s", self._ds)
return d_cfgs
def _get_env_configs(self):
@@ -226,8 +227,8 @@ class ConfigMerger(object):
try:
e_cfgs.append(util.read_conf(e_fn))
except:
- util.logexc(LOG, ('Failed loading of env. config'
- ' from %s'), e_fn)
+ util.logexc(LOG, 'Failed loading of env. config from %s',
+ e_fn)
return e_cfgs
def _get_instance_configs(self):
@@ -241,8 +242,8 @@ class ConfigMerger(object):
try:
i_cfgs.append(util.read_conf(cc_fn))
except:
- util.logexc(LOG, ('Failed loading of cloud-config'
- ' from %s'), cc_fn)
+ util.logexc(LOG, 'Failed loading of cloud-config from %s',
+ cc_fn)
return i_cfgs
def _read_cfg(self):
@@ -258,8 +259,8 @@ class ConfigMerger(object):
try:
cfgs.append(util.read_conf(c_fn))
except:
- util.logexc(LOG, ("Failed loading of configuration"
- " from %s"), c_fn)
+ util.logexc(LOG, "Failed loading of configuration from %s",
+ c_fn)
cfgs.extend(self._get_env_configs())
cfgs.extend(self._get_instance_configs())
@@ -280,6 +281,7 @@ class ContentHandlers(object):
def __init__(self):
self.registered = {}
+ self.initialized = []
def __contains__(self, item):
return self.is_registered(item)
@@ -290,11 +292,13 @@ class ContentHandlers(object):
def is_registered(self, content_type):
return content_type in self.registered
- def register(self, mod):
+ def register(self, mod, initialized=False):
types = set()
for t in mod.list_types():
self.registered[t] = mod
types.add(t)
+ if initialized and mod not in self.initialized:
+ self.initialized.append(mod)
return types
def _get_handler(self, content_type):
diff --git a/cloudinit/log.py b/cloudinit/log.py
index da6c2851..622c946c 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -44,13 +44,13 @@ NOTSET = logging.NOTSET
DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
-def setupBasicLogging():
+def setupBasicLogging(level=DEBUG):
root = logging.getLogger()
console = logging.StreamHandler(sys.stderr)
console.setFormatter(logging.Formatter(DEF_CON_FORMAT))
- console.setLevel(DEBUG)
+ console.setLevel(level)
root.addHandler(console)
- root.setLevel(DEBUG)
+ root.setLevel(level)
def flushLoggers(root):
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
new file mode 100644
index 00000000..0978b2c6
--- /dev/null
+++ b/cloudinit/mergers/__init__.py
@@ -0,0 +1,167 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+
+from cloudinit import importer
+from cloudinit import log as logging
+from cloudinit import type_utils
+
+NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$")
+
+LOG = logging.getLogger(__name__)
+DEF_MERGE_TYPE = "list()+dict()+str()"
+MERGER_PREFIX = 'm_'
+MERGER_ATTR = 'Merger'
+
+
+class UnknownMerger(object):
+ # Named differently so auto-method finding
+ # doesn't pick this up if there is ever a type
+ # named "unknown"
+ def _handle_unknown(self, _meth_wanted, value, _merge_with):
+ return value
+
+ # This merging will attempt to look for a '_on_X' method
+ # in our own object for a given object Y with type X,
+ # if found it will be called to perform the merge of a source
+ # object and a object to merge_with.
+ #
+ # If not found the merge will be given to a '_handle_unknown'
+ # function which can decide what to do wit the 2 values.
+ def merge(self, source, merge_with):
+ type_name = type_utils.obj_name(source)
+ type_name = type_name.lower()
+ method_name = "_on_%s" % (type_name)
+ meth = None
+ args = [source, merge_with]
+ if hasattr(self, method_name):
+ meth = getattr(self, method_name)
+ if not meth:
+ meth = self._handle_unknown
+ args.insert(0, method_name)
+ LOG.debug("Merging '%s' into '%s' using method '%s' of '%s'",
+ type_name, type_utils.obj_name(merge_with),
+ meth.__name__, self)
+ return meth(*args)
+
+
+class LookupMerger(UnknownMerger):
+ def __init__(self, lookups=None):
+ UnknownMerger.__init__(self)
+ if lookups is None:
+ self._lookups = []
+ else:
+ self._lookups = lookups
+
+ def __str__(self):
+ return 'LookupMerger: (%s)' % (len(self._lookups))
+
+ # For items which can not be merged by the parent this object
+ # will lookup in a internally maintained set of objects and
+ # find which one of those objects can perform the merge. If
+ # any of the contained objects have the needed method, they
+ # will be called to perform the merge.
+ def _handle_unknown(self, meth_wanted, value, merge_with):
+ meth = None
+ for merger in self._lookups:
+ if hasattr(merger, meth_wanted):
+ # First one that has that method/attr gets to be
+ # the one that will be called
+ meth = getattr(merger, meth_wanted)
+ LOG.debug(("Merging using located merger '%s'"
+ " since it had method '%s'"), merger, meth_wanted)
+ break
+ if not meth:
+ return UnknownMerger._handle_unknown(self, meth_wanted,
+ value, merge_with)
+ return meth(value, merge_with)
+
+
+def dict_extract_mergers(config):
+ parsed_mergers = []
+ raw_mergers = config.pop('merge_how', None)
+ if raw_mergers is None:
+ raw_mergers = config.pop('merge_type', None)
+ if raw_mergers is None:
+ return parsed_mergers
+ if isinstance(raw_mergers, (str, basestring)):
+ return string_extract_mergers(raw_mergers)
+ for m in raw_mergers:
+ if isinstance(m, (dict)):
+ name = m['name']
+ name = name.replace("-", "_").strip()
+ opts = m['settings']
+ else:
+ name = m[0]
+ if len(m) >= 2:
+ opts = m[1:]
+ else:
+ opts = []
+ if name:
+ parsed_mergers.append((name, opts))
+ return parsed_mergers
+
+
+def string_extract_mergers(merge_how):
+ parsed_mergers = []
+ for m_name in merge_how.split("+"):
+ # Canonicalize the name (so that it can be found
+ # even when users alter it in various ways)
+ m_name = m_name.lower().strip()
+ m_name = m_name.replace("-", "_")
+ if not m_name:
+ continue
+ match = NAME_MTCH.match(m_name)
+ if not match:
+ msg = ("Matcher identifer '%s' is not in the right format" %
+ (m_name))
+ raise ValueError(msg)
+ (m_name, m_ops) = match.groups()
+ m_ops = m_ops.strip().split(",")
+ m_ops = [m.strip().lower() for m in m_ops if m.strip()]
+ parsed_mergers.append((m_name, m_ops))
+ return parsed_mergers
+
+
+def default_mergers():
+ return tuple(string_extract_mergers(DEF_MERGE_TYPE))
+
+
+def construct(parsed_mergers):
+ mergers_to_be = []
+ for (m_name, m_ops) in parsed_mergers:
+ if not m_name.startswith(MERGER_PREFIX):
+ m_name = MERGER_PREFIX + str(m_name)
+ merger_locs = importer.find_module(m_name,
+ [__name__],
+ [MERGER_ATTR])
+ if not merger_locs:
+ msg = ("Could not find merger module named '%s' "
+ "with attribute '%s'") % (m_name, MERGER_ATTR)
+ raise ImportError(msg)
+ else:
+ mod = importer.import_module(merger_locs[0])
+ mod_attr = getattr(mod, MERGER_ATTR)
+ mergers_to_be.append((mod_attr, m_ops))
+ # Now form them...
+ mergers = []
+ root = LookupMerger(mergers)
+ for (attr, opts) in mergers_to_be:
+ mergers.append(attr(root, opts))
+ return root
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
new file mode 100644
index 00000000..a16141fa
--- /dev/null
+++ b/cloudinit/mergers/m_dict.py
@@ -0,0 +1,86 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+DEF_MERGE_TYPE = 'no_replace'
+MERGE_TYPES = ('replace', DEF_MERGE_TYPE,)
+
+
+def _has_any(what, *keys):
+ for k in keys:
+ if k in what:
+ return True
+ return False
+
+
+class Merger(object):
+ def __init__(self, merger, opts):
+ self._merger = merger
+ # Affects merging behavior...
+ self._method = DEF_MERGE_TYPE
+ for m in MERGE_TYPES:
+ if m in opts:
+ self._method = m
+ break
+ # Affect how recursive merging is done on other primitives.
+ self._recurse_str = 'recurse_str' in opts
+ self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
+ self._allow_delete = 'allow_delete' in opts
+ # Backwards compat require this to be on.
+ self._recurse_dict = True
+
+ def __str__(self):
+ s = ('DictMerger: (method=%s,recurse_str=%s,'
+ 'recurse_dict=%s,recurse_array=%s,allow_delete=%s)')
+ s = s % (self._method, self._recurse_str,
+ self._recurse_dict, self._recurse_array, self._allow_delete)
+ return s
+
+ def _do_dict_replace(self, value, merge_with, do_replace):
+
+ def merge_same_key(old_v, new_v):
+ if do_replace:
+ return new_v
+ if isinstance(new_v, (list, tuple)) and self._recurse_array:
+ return self._merger.merge(old_v, new_v)
+ if isinstance(new_v, (basestring)) and self._recurse_str:
+ return self._merger.merge(old_v, new_v)
+ if isinstance(new_v, (dict)) and self._recurse_dict:
+ return self._merger.merge(old_v, new_v)
+ # Otherwise leave it be...
+ return old_v
+
+ for (k, v) in merge_with.items():
+ if k in value:
+ if v is None and self._allow_delete:
+ value.pop(k)
+ else:
+ value[k] = merge_same_key(value[k], v)
+ else:
+ value[k] = v
+ return value
+
+ def _on_dict(self, value, merge_with):
+ if not isinstance(merge_with, (dict)):
+ return value
+ if self._method == 'replace':
+ merged = self._do_dict_replace(dict(value), merge_with, True)
+ elif self._method == 'no_replace':
+ merged = self._do_dict_replace(dict(value), merge_with, False)
+ else:
+ raise NotImplementedError("Unknown merge type %s" % (self._method))
+ return merged
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
new file mode 100644
index 00000000..62999b4e
--- /dev/null
+++ b/cloudinit/mergers/m_list.py
@@ -0,0 +1,87 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+DEF_MERGE_TYPE = 'replace'
+MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace')
+
+
+def _has_any(what, *keys):
+ for k in keys:
+ if k in what:
+ return True
+ return False
+
+
+class Merger(object):
+ def __init__(self, merger, opts):
+ self._merger = merger
+ # Affects merging behavior...
+ self._method = DEF_MERGE_TYPE
+ for m in MERGE_TYPES:
+ if m in opts:
+ self._method = m
+ break
+ # Affect how recursive merging is done on other primitives
+ self._recurse_str = _has_any(opts, 'recurse_str')
+ self._recurse_dict = _has_any(opts, 'recurse_dict')
+ self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
+
+ def __str__(self):
+ return ('ListMerger: (method=%s,recurse_str=%s,'
+ 'recurse_dict=%s,recurse_array=%s)') % (self._method,
+ self._recurse_str,
+ self._recurse_dict,
+ self._recurse_array)
+
+ def _on_tuple(self, value, merge_with):
+ return tuple(self._on_list(list(value), merge_with))
+
+ def _on_list(self, value, merge_with):
+ if (self._method == 'replace' and
+ not isinstance(merge_with, (tuple, list))):
+ return merge_with
+
+ # Ok we now know that what we are merging with is a list or tuple.
+ merged_list = []
+ if self._method == 'prepend':
+ merged_list.extend(merge_with)
+ merged_list.extend(value)
+ return merged_list
+ elif self._method == 'append':
+ merged_list.extend(value)
+ merged_list.extend(merge_with)
+ return merged_list
+
+ def merge_same_index(old_v, new_v):
+ if self._method == 'no_replace':
+ # Leave it be...
+ return old_v
+ if isinstance(new_v, (list, tuple)) and self._recurse_array:
+ return self._merger.merge(old_v, new_v)
+ if isinstance(new_v, (str, basestring)) and self._recurse_str:
+ return self._merger.merge(old_v, new_v)
+ if isinstance(new_v, (dict)) and self._recurse_dict:
+ return self._merger.merge(old_v, new_v)
+ return new_v
+
+ # Ok now we are replacing same indexes
+ merged_list.extend(value)
+ common_len = min(len(merged_list), len(merge_with))
+ for i in xrange(0, common_len):
+ merged_list[i] = merge_same_index(merged_list[i], merge_with[i])
+ return merged_list
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
new file mode 100644
index 00000000..e22ce28a
--- /dev/null
+++ b/cloudinit/mergers/m_str.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class Merger(object):
+ def __init__(self, _merger, opts):
+ self._append = 'append' in opts
+
+ def __str__(self):
+ return 'StringMerger: (append=%s)' % (self._append)
+
+ # On encountering a unicode object to merge value with
+ # we will for now just proxy into the string method to let it handle it.
+ def _on_unicode(self, value, merge_with):
+ return self._on_str(value, merge_with)
+
+ # On encountering a string object to merge with we will
+ # perform the following action, if appending we will
+ # merge them together, otherwise we will just return value.
+ def _on_str(self, value, merge_with):
+ if not isinstance(value, (basestring)):
+ return merge_with
+ if not self._append:
+ return merge_with
+ if isinstance(value, unicode):
+ return value + unicode(merge_with)
+ else:
+ return value + str(merge_with)
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 4b95b5b7..5df7f557 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -32,11 +32,13 @@ CFG_BUILTIN = {
'NoCloud',
'ConfigDrive',
'OpenNebula',
+ 'Azure',
'AltCloud',
'OVF',
'MAAS',
'Ec2',
'CloudStack',
+ 'SmartOS',
# At the end to act as a 'catch' when none of the above work...
'None',
],
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 9812bdcb..a834f8eb 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -1,10 +1,11 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Joe VLcek <JVLcek@RedHat.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -30,6 +31,7 @@ import os.path
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
+
from cloudinit.util import ProcessExecutionError
LOG = logging.getLogger(__name__)
@@ -78,7 +80,7 @@ def read_user_data_callback(mount_dir):
try:
user_data = util.load_file(user_data_file).strip()
except IOError:
- util.logexc(LOG, ('Failed accessing user data file.'))
+ util.logexc(LOG, 'Failed accessing user data file.')
return None
return user_data
@@ -91,8 +93,8 @@ class DataSourceAltCloud(sources.DataSource):
self.supported_seed_starts = ("/", "file://")
def __str__(self):
- mstr = "%s [seed=%s]" % (util.obj_name(self), self.seed)
- return mstr
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s]" % (root, self.seed)
def get_cloud_type(self):
'''
@@ -177,7 +179,7 @@ class DataSourceAltCloud(sources.DataSource):
return False
# No user data found
- util.logexc(LOG, ('Failed accessing user data.'))
+ util.logexc(LOG, 'Failed accessing user data.')
return False
def user_data_rhevm(self):
@@ -204,12 +206,12 @@ class DataSourceAltCloud(sources.DataSource):
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
except ProcessExecutionError, _err:
- util.logexc(LOG, (('Failed command: %s\n%s') % \
- (' '.join(cmd), _err.message)))
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
+ _err.message)
return False
except OSError, _err:
- util.logexc(LOG, (('Failed command: %s\n%s') % \
- (' '.join(cmd), _err.message)))
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
+ _err.message)
return False
floppy_dev = '/dev/fd0'
@@ -221,12 +223,12 @@ class DataSourceAltCloud(sources.DataSource):
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
except ProcessExecutionError, _err:
- util.logexc(LOG, (('Failed command: %s\n%s') % \
- (' '.join(cmd), _err.message)))
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
+ _err.message)
return False
except OSError, _err:
- util.logexc(LOG, (('Failed command: %s\n%s') % \
- (' '.join(cmd), _err.message)))
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
+ _err.message)
return False
try:
@@ -235,8 +237,8 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, ("Failed to mount %s"
- " when looking for user data"), floppy_dev)
+ util.logexc(LOG, "Failed to mount %s when looking for user data",
+ floppy_dev)
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
@@ -271,8 +273,8 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, ("Failed to mount %s"
- " when looking for user data"), cdrom_dev)
+ util.logexc(LOG, "Failed to mount %s when looking for user "
+ "data", cdrom_dev)
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
new file mode 100644
index 00000000..66d7728b
--- /dev/null
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -0,0 +1,502 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2013 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import base64
+import crypt
+import os
+import os.path
+import time
+from xml.dom import minidom
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+DS_NAME = 'Azure'
+DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
+AGENT_START = ['service', 'walinuxagent', 'start']
+BOUNCE_COMMAND = ['sh', '-xc',
+ "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
+
+BUILTIN_DS_CONFIG = {
+ 'agent_command': AGENT_START,
+ 'data_dir': "/var/lib/waagent",
+ 'set_hostname': True,
+ 'hostname_bounce': {
+ 'interface': 'eth0',
+ 'policy': True,
+ 'command': BOUNCE_COMMAND,
+ 'hostname_command': 'hostname',
+ }
+}
+DS_CFG_PATH = ['datasource', DS_NAME]
+
+
+class DataSourceAzureNet(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed_dir = os.path.join(paths.seed_dir, 'azure')
+ self.cfg = {}
+ self.seed = None
+ self.ds_cfg = util.mergemanydict([
+ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
+ BUILTIN_DS_CONFIG])
+
+ def __str__(self):
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s]" % (root, self.seed)
+
+ def get_data(self):
+ # azure removes/ejects the cdrom containing the ovf-env.xml
+ # file on reboot. So, in order to successfully reboot we
+ # need to look in the datadir and consider that valid
+ ddir = self.ds_cfg['data_dir']
+
+ candidates = [self.seed_dir]
+ candidates.extend(list_possible_azure_ds_devs())
+ if ddir:
+ candidates.append(ddir)
+
+ found = None
+
+ for cdev in candidates:
+ try:
+ if cdev.startswith("/dev/"):
+ ret = util.mount_cb(cdev, load_azure_ds_dir)
+ else:
+ ret = load_azure_ds_dir(cdev)
+
+ except NonAzureDataSource:
+ continue
+ except BrokenAzureDataSource as exc:
+ raise exc
+ except util.MountFailedError:
+ LOG.warn("%s was not mountable" % cdev)
+ continue
+
+ (md, self.userdata_raw, cfg, files) = ret
+ self.seed = cdev
+ self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
+ self.cfg = cfg
+ found = cdev
+
+ LOG.debug("found datasource in %s", cdev)
+ break
+
+ if not found:
+ return False
+
+ if found == ddir:
+ LOG.debug("using files cached in %s", ddir)
+
+ # now update ds_cfg to reflect contents pass in config
+ usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
+ self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg])
+ mycfg = self.ds_cfg
+
+ # walinux agent writes files world readable, but expects
+ # the directory to be protected.
+ write_files(mycfg['data_dir'], files, dirmode=0700)
+
+ # handle the hostname 'publishing'
+ try:
+ handle_set_hostname(mycfg.get('set_hostname'),
+ self.metadata.get('local-hostname'),
+ mycfg['hostname_bounce'])
+ except Exception as e:
+ LOG.warn("Failed publishing hostname: %s" % e)
+ util.logexc(LOG, "handling set_hostname failed")
+
+ try:
+ invoke_agent(mycfg['agent_command'])
+ except util.ProcessExecutionError:
+ # claim the datasource even if the command failed
+ util.logexc(LOG, "agent command '%s' failed.",
+ mycfg['agent_command'])
+
+ shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml")
+ wait_for = [shcfgxml]
+
+ fp_files = []
+ for pk in self.cfg.get('_pubkeys', []):
+ bname = pk['fingerprint'] + ".crt"
+ fp_files += [os.path.join(mycfg['data_dir'], bname)]
+
+ missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+ func=wait_for_files,
+ args=(wait_for + fp_files,))
+ if len(missing):
+ LOG.warn("Did not find files, but going on: %s", missing)
+
+ if shcfgxml in missing:
+ LOG.warn("SharedConfig.xml missing, using static instance-id")
+ else:
+ try:
+ self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
+ except ValueError as e:
+ LOG.warn("failed to get instance id in %s: %s" % (shcfgxml, e))
+
+ pubkeys = pubkeys_from_crt_files(fp_files)
+
+ self.metadata['public-keys'] = pubkeys
+
+ return True
+
+ def get_config_obj(self):
+ return self.cfg
+
+
+def handle_set_hostname(enabled, hostname, cfg):
+ if not util.is_true(enabled):
+ return
+
+ if not hostname:
+ LOG.warn("set_hostname was true but no local-hostname")
+ return
+
+ apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
+ interface=cfg['interface'],
+ command=cfg['command'],
+ hostname_command=cfg['hostname_command'])
+
+
+def apply_hostname_bounce(hostname, policy, interface, command,
+ hostname_command="hostname"):
+ # set the hostname to 'hostname' if it is not already set to that.
+ # then, if policy is not off, bounce the interface using command
+ prev_hostname = util.subp(hostname_command, capture=True)[0].strip()
+
+ util.subp([hostname_command, hostname])
+
+ msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
+ (prev_hostname, hostname, policy, interface))
+
+ if util.is_false(policy):
+ LOG.debug("pubhname: policy false, skipping [%s]", msg)
+ return
+
+ if prev_hostname == hostname and policy != "force":
+ LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
+ return
+
+ env = os.environ.copy()
+ env['interface'] = interface
+ env['hostname'] = hostname
+ env['old_hostname'] = prev_hostname
+
+ if command == "builtin":
+ command = BOUNCE_COMMAND
+
+ LOG.debug("pubhname: publishing hostname [%s]", msg)
+ shell = not isinstance(command, (list, tuple))
+ # capture=False, see comments in bug 1202758 and bug 1206164.
+ util.log_time(logfunc=LOG.debug, msg="publishing hostname",
+ get_uptime=True, func=util.subp,
+ kwargs={'args': command, 'shell': shell, 'capture': False,
+ 'env': env})
+
+
+def crtfile_to_pubkey(fname):
+ pipeline = ('openssl x509 -noout -pubkey < "$0" |'
+ 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
+ (out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True)
+ return out.rstrip()
+
+
+def pubkeys_from_crt_files(flist):
+ pubkeys = []
+ errors = []
+ for fname in flist:
+ try:
+ pubkeys.append(crtfile_to_pubkey(fname))
+ except util.ProcessExecutionError:
+ errors.extend(fname)
+
+ if errors:
+ LOG.warn("failed to convert the crt files to pubkey: %s" % errors)
+
+ return pubkeys
+
+
+def wait_for_files(flist, maxwait=60, naplen=.5):
+ need = set(flist)
+ waited = 0
+ while waited < maxwait:
+ need -= set([f for f in need if os.path.exists(f)])
+ if len(need) == 0:
+ return []
+ time.sleep(naplen)
+ waited += naplen
+ return need
+
+
+def write_files(datadir, files, dirmode=None):
+ if not datadir:
+ return
+ if not files:
+ files = {}
+ util.ensure_dir(datadir, dirmode)
+ for (name, content) in files.items():
+ util.write_file(filename=os.path.join(datadir, name),
+ content=content, mode=0600)
+
+
+def invoke_agent(cmd):
+ # this is a function itself to simplify patching it for test
+ if cmd:
+ LOG.debug("invoking agent: %s" % cmd)
+ util.subp(cmd, shell=(not isinstance(cmd, list)))
+ else:
+ LOG.debug("not invoking agent")
+
+
+def find_child(node, filter_func):
+ ret = []
+ if not node.hasChildNodes():
+ return ret
+ for child in node.childNodes:
+ if filter_func(child):
+ ret.append(child)
+ return ret
+
+
+def load_azure_ovf_pubkeys(sshnode):
+ # This parses a 'SSH' node formatted like below, and returns
+ # an array of dicts.
+ # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
+ # 'path': 'where/to/go'}]
+ #
+ # <SSH><PublicKeys>
+ # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path>
+ # ...
+ # </PublicKeys></SSH>
+ results = find_child(sshnode, lambda n: n.localName == "PublicKeys")
+ if len(results) == 0:
+ return []
+ if len(results) > 1:
+ raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" %
+ len(results))
+
+ pubkeys_node = results[0]
+ pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
+
+ if len(pubkeys) == 0:
+ return []
+
+ found = []
+ text_node = minidom.Document.TEXT_NODE
+
+ for pk_node in pubkeys:
+ if not pk_node.hasChildNodes():
+ continue
+ cur = {'fingerprint': "", 'path': ""}
+ for child in pk_node.childNodes:
+ if (child.nodeType == text_node or not child.localName):
+ continue
+
+ name = child.localName.lower()
+
+ if name not in cur.keys():
+ continue
+
+ if (len(child.childNodes) != 1 or
+ child.childNodes[0].nodeType != text_node):
+ continue
+
+ cur[name] = child.childNodes[0].wholeText.strip()
+ found.append(cur)
+
+ return found
+
+
+def single_node_at_path(node, pathlist):
+ curnode = node
+ for tok in pathlist:
+ results = find_child(curnode, lambda n: n.localName == tok)
+ if len(results) == 0:
+ raise ValueError("missing %s token in %s" % (tok, str(pathlist)))
+ if len(results) > 1:
+ raise ValueError("found %s nodes of type %s looking for %s" %
+ (len(results), tok, str(pathlist)))
+ curnode = results[0]
+
+ return curnode
+
+
+def read_azure_ovf(contents):
+ try:
+ dom = minidom.parseString(contents)
+ except Exception as e:
+ raise NonAzureDataSource("invalid xml: %s" % e)
+
+ results = find_child(dom.documentElement,
+ lambda n: n.localName == "ProvisioningSection")
+
+ if len(results) == 0:
+ raise NonAzureDataSource("No ProvisioningSection")
+ if len(results) > 1:
+ raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
+ len(results))
+ provSection = results[0]
+
+ lpcs_nodes = find_child(provSection,
+ lambda n: n.localName == "LinuxProvisioningConfigurationSet")
+
+ if len(results) == 0:
+ raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
+ if len(results) > 1:
+ raise BrokenAzureDataSource("found '%d' %ss" %
+ ("LinuxProvisioningConfigurationSet",
+ len(results)))
+ lpcs = lpcs_nodes[0]
+
+ if not lpcs.hasChildNodes():
+ raise BrokenAzureDataSource("no child nodes of configuration set")
+
+ md_props = 'seedfrom'
+ md = {'azure_data': {}}
+ cfg = {}
+ ud = ""
+ password = None
+ username = None
+
+ for child in lpcs.childNodes:
+ if child.nodeType == dom.TEXT_NODE or not child.localName:
+ continue
+
+ name = child.localName.lower()
+
+ simple = False
+ value = ""
+ if (len(child.childNodes) == 1 and
+ child.childNodes[0].nodeType == dom.TEXT_NODE):
+ simple = True
+ value = child.childNodes[0].wholeText
+
+ attrs = {k: v for k, v in child.attributes.items()}
+
+ # we accept either UserData or CustomData. If both are present
+ # then behavior is undefined.
+ if (name == "userdata" or name == "customdata"):
+ if attrs.get('encoding') in (None, "base64"):
+ ud = base64.b64decode(''.join(value.split()))
+ else:
+ ud = value
+ elif name == "username":
+ username = value
+ elif name == "userpassword":
+ password = value
+ elif name == "hostname":
+ md['local-hostname'] = value
+ elif name == "dscfg":
+ if attrs.get('encoding') in (None, "base64"):
+ dscfg = base64.b64decode(''.join(value.split()))
+ else:
+ dscfg = value
+ cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
+ elif name == "ssh":
+ cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
+ elif name == "disablesshpasswordauthentication":
+ cfg['ssh_pwauth'] = util.is_false(value)
+ elif simple:
+ if name in md_props:
+ md[name] = value
+ else:
+ md['azure_data'][name] = value
+
+ defuser = {}
+ if username:
+ defuser['name'] = username
+ if password:
+ defuser['passwd'] = encrypt_pass(password)
+ defuser['lock_passwd'] = False
+
+ if defuser:
+ cfg['system_info'] = {'default_user': defuser}
+
+ if 'ssh_pwauth' not in cfg and password:
+ cfg['ssh_pwauth'] = True
+
+ return (md, ud, cfg)
+
+
+def encrypt_pass(password, salt_id="$6$"):
+ return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
+
+
+def list_possible_azure_ds_devs():
+ # return a sorted list of devices that might have a azure datasource
+ devlist = []
+ for fstype in ("iso9660", "udf"):
+ devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
+
+ devlist.sort(reverse=True)
+ return devlist
+
+
+def load_azure_ds_dir(source_dir):
+ ovf_file = os.path.join(source_dir, "ovf-env.xml")
+
+ if not os.path.isfile(ovf_file):
+ raise NonAzureDataSource("No ovf-env file found")
+
+ with open(ovf_file, "r") as fp:
+ contents = fp.read()
+
+ md, ud, cfg = read_azure_ovf(contents)
+ return (md, ud, cfg, {'ovf-env.xml': contents})
+
+
+def iid_from_shared_config(path):
+ with open(path, "rb") as fp:
+ content = fp.read()
+ return iid_from_shared_config_content(content)
+
+
+def iid_from_shared_config_content(content):
+ """
+ find INSTANCE_ID in:
+ <?xml version="1.0" encoding="utf-8"?>
+ <SharedConfig version="1.0.0.0" goalStateIncarnation="1">
+ <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0">
+ <Service name="..." guid="{00000000-0000-0000-0000-000000000000}" />
+ """
+ dom = minidom.parseString(content)
+ depnode = single_node_at_path(dom, ["SharedConfig", "Deployment"])
+ return depnode.attributes.get('name').value
+
+
+class BrokenAzureDataSource(Exception):
+ pass
+
+
+class NonAzureDataSource(Exception):
+ pass
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 275caf0d..08f661e4 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -4,11 +4,13 @@
# Copyright (C) 2012 Cosmin Luta
# Copyright (C) 2012 Yahoo! Inc.
# Copyright (C) 2012 Gerard Dethier
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Cosmin Luta <q4break@gmail.com>
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
# Author: Gerard Dethier <g.dethier@gmail.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -48,9 +50,6 @@ class DataSourceCloudStack(sources.DataSource):
raise RuntimeError("No virtual router found!")
self.metadata_address = "http://%s/" % (vr_addr)
- def __str__(self):
- return util.obj_name(self)
-
def _get_url_settings(self):
mcfg = self.ds_cfg
if not mcfg:
@@ -112,8 +111,8 @@ class DataSourceCloudStack(sources.DataSource):
int(time.time() - start_time))
return True
except Exception:
- util.logexc(LOG, ('Failed fetching from metadata '
- 'service %s'), self.metadata_address)
+ util.logexc(LOG, 'Failed fetching from metadata service %s',
+ self.metadata_address)
return False
def get_instance_id(self):
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index ec016a1d..835f2a9a 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -51,7 +51,9 @@ class DataSourceConfigDrive(sources.DataSource):
self.ec2_metadata = None
def __str__(self):
- mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode,
+ root = sources.DataSource.__str__(self)
+ mstr = "%s [%s,ver=%s]" % (root,
+ self.dsmode,
self.version)
mstr += "[source=%s]" % (self.source)
return mstr
@@ -152,7 +154,7 @@ class DataSourceConfigDrive(sources.DataSource):
return False
md = results['metadata']
- md = util.mergedict(md, DEFAULT_METADATA)
+ md = util.mergemanydict([md, DEFAULT_METADATA])
# Perform some metadata 'fixups'
#
@@ -256,6 +258,10 @@ def find_candidate_devs():
* labeled with 'config-2'
"""
+ # Query optical drive to get it in blkid cache for 2.6 kernels
+ util.find_devs_with(path="/dev/sr0")
+ util.find_devs_with(path="/dev/sr1")
+
by_fstype = (util.find_devs_with("TYPE=vfat") +
util.find_devs_with("TYPE=iso9660"))
by_label = util.find_devs_with("LABEL=config-2")
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 2db53446..f010e640 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -49,9 +49,6 @@ class DataSourceEc2(sources.DataSource):
self.seed_dir = os.path.join(paths.seed_dir, "ec2")
self.api_ver = DEF_MD_VERSION
- def __str__(self):
- return util.obj_name(self)
-
def get_data(self):
seed_ret = {}
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index b55d8a21..dfe90bc6 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -27,7 +27,7 @@ import urllib2
from cloudinit import log as logging
from cloudinit import sources
-from cloudinit import url_helper as uhelp
+from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -50,7 +50,8 @@ class DataSourceMAAS(sources.DataSource):
self.oauth_clockskew = None
def __str__(self):
- return "%s [%s]" % (util.obj_name(self), self.base_url)
+ root = sources.DataSource.__str__(self)
+ return "%s [%s]" % (root, self.base_url)
def get_data(self):
mcfg = self.ds_cfg
@@ -79,7 +80,8 @@ class DataSourceMAAS(sources.DataSource):
self.base_url = url
(userdata, metadata) = read_maas_seed_url(self.base_url,
- self.md_headers)
+ self._md_headers,
+ paths=self.paths)
self.userdata_raw = userdata
self.metadata = metadata
return True
@@ -87,7 +89,7 @@ class DataSourceMAAS(sources.DataSource):
util.logexc(LOG, "Failed fetching metadata from url %s", url)
return False
- def md_headers(self, url):
+ def _md_headers(self, url):
mcfg = self.ds_cfg
# If we are missing token_key, token_secret or consumer_key
@@ -131,36 +133,37 @@ class DataSourceMAAS(sources.DataSource):
starttime = time.time()
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, exception_cb=self._except_cb,
- headers_cb=self.md_headers)
+ url = url_helper.wait_for_url(urls=urls, max_wait=max_wait,
+ timeout=timeout,
+ exception_cb=self._except_cb,
+ headers_cb=self._md_headers)
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
LOG.critical("Giving up on md from %s after %i seconds",
- urls, int(time.time() - starttime))
+ urls, int(time.time() - starttime))
return bool(url)
def _except_cb(self, msg, exception):
- if not (isinstance(exception, urllib2.HTTPError) and
+ if not (isinstance(exception, url_helper.UrlError) and
(exception.code == 403 or exception.code == 401)):
return
+
if 'date' not in exception.headers:
- LOG.warn("date field not in %d headers" % exception.code)
+ LOG.warn("Missing header 'date' in %s response", exception.code)
return
date = exception.headers['date']
-
try:
ret_time = time.mktime(parsedate(date))
- except:
- LOG.warn("failed to convert datetime '%s'")
+ except Exception as e:
+ LOG.warn("Failed to convert datetime '%s': %s", date, e)
return
self.oauth_clockskew = int(ret_time - time.time())
- LOG.warn("set oauth clockskew to %d" % self.oauth_clockskew)
+ LOG.warn("Setting oauth clockskew to %d", self.oauth_clockskew)
return
@@ -188,11 +191,11 @@ def read_maas_seed_dir(seed_d):
def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
- version=MD_VERSION):
+ version=MD_VERSION, paths=None):
"""
Read the maas datasource at seed_url.
- header_cb is a method that should return a headers dictionary that will
- be given to urllib2.Request()
+ - header_cb is a method that should return a headers dictionary for
+ a given url
Expected format of seed_url is are the following files:
* <seed_url>/<version>/meta-data/instance-id
@@ -215,18 +218,28 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
md = {}
for name in file_order:
url = files.get(name)
- if header_cb:
- headers = header_cb(url)
+ if not header_cb:
+ def _cb(url):
+ return {}
+ header_cb = _cb
+
+ if name == 'user-data':
+ retries = 0
else:
- headers = {}
+ retries = None
+
try:
- resp = uhelp.readurl(url, headers=headers, timeout=timeout)
+ ssl_details = util.fetch_ssl_details(paths)
+ resp = util.read_file_or_url(url, retries=retries,
+ headers_cb=header_cb,
+ timeout=timeout,
+ ssl_details=ssl_details)
if resp.ok():
md[name] = str(resp)
else:
LOG.warn(("Fetching from %s resulted in"
" an invalid http code %s"), url, resp.code)
- except urllib2.HTTPError as e:
+ except url_helper.UrlError as e:
if e.code != 404:
raise
return check_seed_contents(md, seed_url)
@@ -369,7 +382,8 @@ if __name__ == "__main__":
if args.subcmd == "check-seed":
if args.url.startswith("http"):
(userdata, metadata) = read_maas_seed_url(args.url,
- header_cb=my_headers, version=args.apiver)
+ header_cb=my_headers,
+ version=args.apiver)
else:
(userdata, metadata) = read_maas_seed_url(args.url)
print "=== userdata ==="
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 097bbc52..4ef92a56 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
@@ -40,9 +40,8 @@ class DataSourceNoCloud(sources.DataSource):
self.supported_seed_starts = ("/", "file://")
def __str__(self):
- mstr = "%s [seed=%s][dsmode=%s]" % (util.obj_name(self),
- self.seed, self.dsmode)
- return mstr
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
def get_data(self):
defaults = {
@@ -65,7 +64,7 @@ class DataSourceNoCloud(sources.DataSource):
# Check to see if the seed dir has data.
seedret = {}
if util.read_optional_seed(seedret, base=self.seed_dir + "/"):
- md = util.mergedict(md, seedret['meta-data'])
+ md = util.mergemanydict([md, seedret['meta-data']])
ud = seedret['user-data']
found.append(self.seed_dir)
LOG.debug("Using seeded cache data from %s", self.seed_dir)
@@ -82,15 +81,19 @@ class DataSourceNoCloud(sources.DataSource):
if self.ds_cfg['user-data']:
ud = self.ds_cfg['user-data']
if self.ds_cfg['meta-data'] is not False:
- md = util.mergedict(md, self.ds_cfg['meta-data'])
+ md = util.mergemanydict([md, self.ds_cfg['meta-data']])
if 'ds_config' not in found:
found.append("ds_config")
- if self.ds_cfg.get('fs_label', "cidata"):
+ label = self.ds_cfg.get('fs_label', "cidata")
+ if label is not None:
+ # Query optical drive to get it in blkid cache for 2.6 kernels
+ util.find_devs_with(path="/dev/sr0")
+ util.find_devs_with(path="/dev/sr1")
+
fslist = util.find_devs_with("TYPE=vfat")
fslist.extend(util.find_devs_with("TYPE=iso9660"))
- label = self.ds_cfg.get('fs_label')
label_list = util.find_devs_with("LABEL=%s" % label)
devlist = list(set(fslist) & set(label_list))
devlist.sort(reverse=True)
@@ -100,7 +103,7 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Attempting to use data from %s", dev)
(newmd, newud) = util.mount_cb(dev, util.read_seeded)
- md = util.mergedict(newmd, md)
+ md = util.mergemanydict([newmd, md])
ud = newud
# For seed from a device, the default mode is 'net'.
@@ -116,8 +119,8 @@ class DataSourceNoCloud(sources.DataSource):
if e.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, ("Failed to mount %s"
- " when looking for data"), dev)
+ util.logexc(LOG, "Failed to mount %s when looking for "
+ "data", dev)
# There was no indication on kernel cmdline or data
# in the seeddir suggesting this handler should be used.
@@ -150,11 +153,11 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Using seeded cache data from %s", seedfrom)
# Values in the command line override those from the seed
- md = util.mergedict(md, md_seed)
+ md = util.mergemanydict([md, md_seed])
found.append(seedfrom)
# Now that we have exhausted any other places merge in the defaults
- md = util.mergedict(md, defaults)
+ md = util.mergemanydict([md, defaults])
# Update the network-interfaces if metadata had 'network-interfaces'
# entry and this is the local datasource, or 'seedfrom' was used
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index c2125bee..12a8a992 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -18,7 +18,6 @@
from cloudinit import log as logging
from cloudinit import sources
-from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -41,9 +40,6 @@ class DataSourceNone(sources.DataSource):
def get_instance_id(self):
return 'iid-datasource-none'
- def __str__(self):
- return util.obj_name(self)
-
@property
def is_disconnected(self):
return True
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index e90150c6..77b43e17 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -43,7 +43,8 @@ class DataSourceOVF(sources.DataSource):
self.supported_seed_starts = ("/", "file://")
def __str__(self):
- return "%s [seed=%s]" % (util.obj_name(self), self.seed)
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s]" % (root, self.seed)
def get_data(self):
found = []
@@ -93,11 +94,11 @@ class DataSourceOVF(sources.DataSource):
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
LOG.debug("Using seeded cache data from %s", seedfrom)
- md = util.mergedict(md, md_seed)
+ md = util.mergemanydict([md, md_seed])
found.append(seedfrom)
# Now that we have exhausted any other places merge in the defaults
- md = util.mergedict(md, defaults)
+ md = util.mergemanydict([md, defaults])
self.seed = ",".join(found)
self.metadata = md
@@ -193,6 +194,11 @@ def transport_iso9660(require_iso=True):
if contents is not False:
return (contents, dev, fname)
+ if require_iso:
+ mtype = "iso9660"
+ else:
+ mtype = None
+
devs = os.listdir("/dev/")
devs.sort()
for dev in devs:
@@ -210,7 +216,7 @@ def transport_iso9660(require_iso=True):
try:
(fname, contents) = util.mount_cb(fullp,
- get_ovf_env, mtype="iso9660")
+ get_ovf_env, mtype=mtype)
except util.MountFailedError:
LOG.debug("%s not mountable as iso9660" % fullp)
continue
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
new file mode 100644
index 00000000..d348d20b
--- /dev/null
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -0,0 +1,244 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2013 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Datasource for provisioning on SmartOS. This works on Joyent
+# and public/private Clouds using SmartOS.
+#
+# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests.
+# The meta-data is transmitted via key/value pairs made by
+# requests on the console. For example, to get the hostname, you
+# would send "GET hostname" on /dev/ttyS1.
+#
+
+
+import base64
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+import os
+import os.path
+import serial
+
+DEF_TTY_LOC = '/dev/ttyS1'
+DEF_TTY_TIMEOUT = 60
+LOG = logging.getLogger(__name__)
+
+SMARTOS_ATTRIB_MAP = {
+ #Cloud-init Key : (SmartOS Key, Strip line endings)
+ 'local-hostname': ('hostname', True),
+ 'public-keys': ('root_authorized_keys', True),
+ 'user-script': ('user-script', False),
+ 'user-data': ('user-data', False),
+ 'iptables_disable': ('iptables_disable', True),
+ 'motd_sys_info': ('motd_sys_info', True),
+}
+
+# These are values which will never be base64 encoded.
+# They come from the cloud platform, not user
+SMARTOS_NO_BASE64 = ['root_authorized_keys', 'motd_sys_info',
+ 'iptables_disable']
+
+
+class DataSourceSmartOS(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed_dir = os.path.join(paths.seed_dir, 'sdc')
+ self.is_smartdc = None
+
+ self.seed = self.ds_cfg.get("serial_device", DEF_TTY_LOC)
+ self.seed_timeout = self.ds_cfg.get("serial_timeout", DEF_TTY_TIMEOUT)
+ self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode',
+ SMARTOS_NO_BASE64)
+ self.b64_keys = self.ds_cfg.get('base64_keys', [])
+ self.b64_all = self.ds_cfg.get('base64_all', False)
+
+ def __str__(self):
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s]" % (root, self.seed)
+
+ def get_data(self):
+ md = {}
+ ud = ""
+
+ if not os.path.exists(self.seed):
+ LOG.debug("Host does not appear to be on SmartOS")
+ return False
+ self.seed = self.seed
+
+ dmi_info = dmi_data()
+ if dmi_info is False:
+ LOG.debug("No dmidata utility found")
+ return False
+
+ system_uuid, system_type = dmi_info
+ if 'smartdc' not in system_type.lower():
+ LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
+ return False
+ self.is_smartdc = True
+ md['instance-id'] = system_uuid
+
+ b64_keys = self.query('base64_keys', strip=True, b64=False)
+ if b64_keys is not None:
+ self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
+
+ b64_all = self.query('base64_all', strip=True, b64=False)
+ if b64_all is not None:
+ self.b64_all = util.is_true(b64_all)
+
+ for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
+ smartos_noun, strip = attribute
+ md[ci_noun] = self.query(smartos_noun, strip=strip)
+
+ if not md['local-hostname']:
+ md['local-hostname'] = system_uuid
+
+ ud = None
+ if md['user-data']:
+ ud = md['user-data']
+ elif md['user-script']:
+ ud = md['user-script']
+
+ self.metadata = md
+ self.userdata_raw = ud
+ return True
+
+ def get_instance_id(self):
+ return self.metadata['instance-id']
+
+ def query(self, noun, strip=False, default=None, b64=None):
+ if b64 is None:
+ if noun in self.smartos_no_base64:
+ b64 = False
+ elif self.b64_all or noun in self.b64_keys:
+ b64 = True
+
+ return query_data(noun=noun, strip=strip, seed_device=self.seed,
+ seed_timeout=self.seed_timeout, default=default,
+ b64=b64)
+
+
+def get_serial(seed_device, seed_timeout):
+ """This is replaced in unit testing, allowing us to replace
+ serial.Serial with a mocked class.
+
+ The timeout value of 60 seconds should never be hit. The value
+ is taken from SmartOS own provisioning tools. Since we are reading
+ each line individually up until the single ".", the transfer is
+ usually very fast (i.e. microseconds) to get the response.
+ """
+ if not seed_device:
+ raise AttributeError("seed_device value is not set")
+
+ ser = serial.Serial(seed_device, timeout=seed_timeout)
+ if not ser.isOpen():
+ raise SystemError("Unable to open %s" % seed_device)
+
+ return ser
+
+
+def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
+ b64=None):
+ """Makes a request to via the serial console via "GET <NOUN>"
+
+ In the response, the first line is the status, while subsequent lines
+ are is the value. A blank line with a "." is used to indicate end of
+ response.
+
+ If the response is expected to be base64 encoded, then set b64encoded
+ to true. Unfortantely, there is no way to know if something is 100%
+ encoded, so this method relies on being told if the data is base64 or
+ not.
+ """
+
+ if not noun:
+ return False
+
+ ser = get_serial(seed_device, seed_timeout)
+ ser.write("GET %s\n" % noun.rstrip())
+ status = str(ser.readline()).rstrip()
+ response = []
+ eom_found = False
+
+ if 'SUCCESS' not in status:
+ ser.close()
+ return default
+
+ while not eom_found:
+ m = ser.readline()
+ if m.rstrip() == ".":
+ eom_found = True
+ else:
+ response.append(m)
+
+ ser.close()
+
+ if b64 is None:
+ b64 = query_data('b64-%s' % noun, seed_device=seed_device,
+ seed_timeout=seed_timeout, b64=False,
+ default=False, strip=True)
+ b64 = util.is_true(b64)
+
+ resp = None
+ if b64 or strip:
+ resp = "".join(response).rstrip()
+ else:
+ resp = "".join(response)
+
+ if b64:
+ try:
+ return base64.b64decode(resp)
+ except TypeError:
+ LOG.warn("Failed base64 decoding key '%s'", noun)
+ return resp
+
+ return resp
+
+
+def dmi_data():
+ sys_uuid, sys_type = None, None
+ dmidecode_path = util.which('dmidecode')
+ if not dmidecode_path:
+ return False
+
+ sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"]
+ try:
+ LOG.debug("Getting hostname from dmidecode")
+ (sys_uuid, _err) = util.subp(sys_uuid_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to get system UUID", e)
+
+ sys_type_cmd = [dmidecode_path, "-s", "system-product-name"]
+ try:
+ LOG.debug("Determining hypervisor product name via dmidecode")
+ (sys_type, _err) = util.subp(sys_type_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to get system UUID", e)
+
+ return sys_uuid.lower(), sys_type
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 0bad4c8b..b0e43954 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -25,6 +25,7 @@ import os
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import type_utils
from cloudinit import user_data as ud
from cloudinit import util
@@ -52,7 +53,7 @@ class DataSource(object):
self.userdata = None
self.metadata = None
self.userdata_raw = None
- name = util.obj_name(self)
+ name = type_utils.obj_name(self)
if name.startswith(DS_PREFIX):
name = name[len(DS_PREFIX):]
self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
@@ -62,6 +63,9 @@ class DataSource(object):
else:
self.ud_proc = ud_proc
+ def __str__(self):
+ return type_utils.obj_name(self)
+
def get_userdata(self, apply_filter=False):
if self.userdata is None:
self.userdata = self.ud_proc.process(self.get_userdata_raw())
@@ -131,7 +135,8 @@ class DataSource(object):
@property
def availability_zone(self):
- return self.metadata.get('availability-zone')
+ return self.metadata.get('availability-zone',
+ self.metadata.get('availability_zone'))
def get_instance_id(self):
if not self.metadata or 'instance-id' not in self.metadata:
@@ -221,7 +226,7 @@ def normalize_pubkey_data(pubkey_data):
def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
ds_list = list_sources(cfg_list, ds_deps, pkg_list)
- ds_names = [util.obj_name(f) for f in ds_list]
+ ds_names = [type_utils.obj_name(f) for f in ds_list]
LOG.debug("Searching for data source in: %s", ds_names)
for cls in ds_list:
@@ -229,7 +234,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
LOG.debug("Seeing if we can get any data from %s", cls)
s = cls(sys_cfg, distro, paths)
if s.get_data():
- return (s, util.obj_name(cls))
+ return (s, type_utils.obj_name(cls))
except Exception:
util.logexc(LOG, "Getting data from %s failed", cls)
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index dd6b742f..70a577bc 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -19,9 +19,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
-
-import csv
import os
import pwd
@@ -33,6 +30,15 @@ LOG = logging.getLogger(__name__)
# See: man sshd_config
DEF_SSHD_CFG = "/etc/ssh/sshd_config"
+# taken from openssh source key.c/key_type_from_name
+VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
+ "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
+ "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
+ "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com",
+ "ecdsa-sha2-nistp256-cert-v01@openssh.com",
+ "ecdsa-sha2-nistp384-cert-v01@openssh.com",
+ "ecdsa-sha2-nistp521-cert-v01@openssh.com")
+
class AuthKeyLine(object):
def __init__(self, source, keytype=None, base64=None,
@@ -43,11 +49,8 @@ class AuthKeyLine(object):
self.keytype = keytype
self.source = source
- def empty(self):
- if (not self.base64 and
- not self.comment and not self.keytype and not self.options):
- return True
- return False
+ def valid(self):
+ return (self.base64 and self.keytype)
def __str__(self):
toks = []
@@ -107,62 +110,47 @@ class AuthKeyLineParser(object):
i = i + 1
options = ent[0:i]
- options_lst = []
-
- # Now use a csv parser to pull the options
- # out of the above string that we just found an endpoint for.
- #
- # No quoting so we don't mess up any of the quoting that
- # is already there.
- reader = csv.reader(StringIO(options), quoting=csv.QUOTE_NONE)
- for row in reader:
- for e in row:
- # Only keep non-empty csv options
- e = e.strip()
- if e:
- options_lst.append(e)
-
- # Now take the rest of the items before the string
- # as long as there is room to do this...
- toks = []
- if i + 1 < len(ent):
- rest = ent[i + 1:]
- toks = rest.split(None, 2)
- return (options_lst, toks)
-
- def _form_components(self, src_line, toks, options=None):
- components = {}
- if len(toks) == 1:
- components['base64'] = toks[0]
- elif len(toks) == 2:
- components['base64'] = toks[0]
- components['comment'] = toks[1]
- elif len(toks) == 3:
- components['keytype'] = toks[0]
- components['base64'] = toks[1]
- components['comment'] = toks[2]
- components['options'] = options
- if not components:
- return AuthKeyLine(src_line)
- else:
- return AuthKeyLine(src_line, **components)
- def parse(self, src_line, def_opt=None):
+ # Return the rest of the string in 'remain'
+ remain = ent[i:].lstrip()
+ return (options, remain)
+
+ def parse(self, src_line, options=None):
+ # modeled after opensshes auth2-pubkey.c:user_key_allowed2
line = src_line.rstrip("\r\n")
if line.startswith("#") or line.strip() == '':
return AuthKeyLine(src_line)
- else:
- ent = line.strip()
- toks = ent.split(None, 3)
- if len(toks) < 4:
- return self._form_components(src_line, toks, def_opt)
- else:
- (options, toks) = self._extract_options(ent)
- if options:
- options = ",".join(options)
- else:
- options = def_opt
- return self._form_components(src_line, toks, options)
+
+ def parse_ssh_key(ent):
+ # return ketype, key, [comment]
+ toks = ent.split(None, 2)
+ if len(toks) < 2:
+ raise TypeError("To few fields: %s" % len(toks))
+ if toks[0] not in VALID_KEY_TYPES:
+ raise TypeError("Invalid keytype %s" % toks[0])
+
+ # valid key type and 2 or 3 fields:
+ if len(toks) == 2:
+ # no comment in line
+ toks.append("")
+
+ return toks
+
+ ent = line.strip()
+ try:
+ (keytype, base64, comment) = parse_ssh_key(ent)
+ except TypeError:
+ (keyopts, remain) = self._extract_options(ent)
+ if options is None:
+ options = keyopts
+
+ try:
+ (keytype, base64, comment) = parse_ssh_key(remain)
+ except TypeError:
+ return AuthKeyLine(src_line)
+
+ return AuthKeyLine(src_line, keytype=keytype, base64=base64,
+ comment=comment, options=options)
def parse_authorized_keys(fname):
@@ -186,11 +174,11 @@ def update_authorized_keys(old_entries, keys):
for i in range(0, len(old_entries)):
ent = old_entries[i]
- if ent.empty() or not ent.base64:
+ if not ent.valid():
continue
# Replace those with the same base64
for k in keys:
- if k.empty() or not k.base64:
+ if not ent.valid():
continue
if k.base64 == ent.base64:
# Replace it with our better one
@@ -241,15 +229,13 @@ def extract_authorized_keys(username):
except (IOError, OSError):
# Give up and use a default key filename
auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
- util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'"
- " in ssh config"
- " from %r, using 'AuthorizedKeysFile' file"
- " %r instead"),
- DEF_SSHD_CFG, auth_key_fn)
+ util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh "
+ "config from %r, using 'AuthorizedKeysFile' file "
+ "%r instead", DEF_SSHD_CFG, auth_key_fn)
return (auth_key_fn, parse_authorized_keys(auth_key_fn))
-def setup_user_keys(keys, username, key_prefix):
+def setup_user_keys(keys, username, options=None):
# Make sure the users .ssh dir is setup accordingly
(ssh_dir, pwent) = users_ssh_info(username)
if not os.path.isdir(ssh_dir):
@@ -260,7 +246,7 @@ def setup_user_keys(keys, username, key_prefix):
parser = AuthKeyLineParser()
key_entries = []
for k in keys:
- key_entries.append(parser.parse(str(k), def_opt=key_prefix))
+ key_entries.append(parser.parse(str(k), options=options))
# Extract the old and make the new
(auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index d7d1dea0..3e49e8c5 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
@@ -43,6 +43,7 @@ from cloudinit import helpers
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import type_utils
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -153,9 +154,8 @@ class Init(object):
try:
util.chownbyname(log_file, u, g)
except OSError:
- util.logexc(LOG, ("Unable to change the ownership"
- " of %s to user %s, group %s"),
- log_file, u, g)
+ util.logexc(LOG, "Unable to change the ownership of %s to "
+ "user %s, group %s", log_file, u, g)
def read_cfg(self, extra_fns=None):
# None check so that we don't keep on re-loading if empty
@@ -211,7 +211,7 @@ class Init(object):
# Any config provided???
pkg_list = self.cfg.get('datasource_pkg_list') or []
# Add the defaults at the end
- for n in ['', util.obj_name(sources)]:
+ for n in ['', type_utils.obj_name(sources)]:
if n not in pkg_list:
pkg_list.append(n)
cfg_list = self.cfg.get('datasource_list') or []
@@ -271,7 +271,7 @@ class Init(object):
dp = self.paths.get_cpath('data')
# Write what the datasource was and is..
- ds = "%s: %s" % (util.obj_name(self.datasource), self.datasource)
+ ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource)
previous_ds = None
ds_fn = os.path.join(idir, 'datasource')
try:
@@ -344,12 +344,13 @@ class Init(object):
cdir = self.paths.get_cpath("handlers")
idir = self._get_ipath("handlers")
- # Add the path to the plugins dir to the top of our list for import
- # instance dir should be read before cloud-dir
- if cdir and cdir not in sys.path:
- sys.path.insert(0, cdir)
- if idir and idir not in sys.path:
- sys.path.insert(0, idir)
+ # Add the path to the plugins dir to the top of our list for importing
+ # new handlers.
+ #
+ # Note(harlowja): instance dir should be read before cloud-dir
+ for d in [cdir, idir]:
+ if d and d not in sys.path:
+ sys.path.insert(0, d)
# Ensure datasource fetched before activation (just incase)
user_data_msg = self.datasource.get_userdata(True)
@@ -357,24 +358,34 @@ class Init(object):
# This keeps track of all the active handlers
c_handlers = helpers.ContentHandlers()
- # Add handlers in cdir
- potential_handlers = util.find_modules(cdir)
- for (fname, mod_name) in potential_handlers.iteritems():
- try:
- mod_locs = importer.find_module(mod_name, [''],
- ['list_types',
- 'handle_part'])
- if not mod_locs:
- LOG.warn(("Could not find a valid user-data handler"
- " named %s in file %s"), mod_name, fname)
- continue
- mod = importer.import_module(mod_locs[0])
- mod = handlers.fixup_handler(mod)
- types = c_handlers.register(mod)
- LOG.debug("Added handler for %s from %s", types, fname)
- except:
- util.logexc(LOG, "Failed to register handler from %s", fname)
-
+ def register_handlers_in_dir(path):
+ # Attempts to register any handler modules under the given path.
+ if not path or not os.path.isdir(path):
+ return
+ potential_handlers = util.find_modules(path)
+ for (fname, mod_name) in potential_handlers.iteritems():
+ try:
+ mod_locs = importer.find_module(mod_name, [''],
+ ['list_types',
+ 'handle_part'])
+ if not mod_locs:
+ LOG.warn(("Could not find a valid user-data handler"
+ " named %s in file %s"), mod_name, fname)
+ continue
+ mod = importer.import_module(mod_locs[0])
+ mod = handlers.fixup_handler(mod)
+ types = c_handlers.register(mod)
+ LOG.debug("Added handler for %s from %s", types, fname)
+ except Exception:
+ util.logexc(LOG, "Failed to register handler from %s",
+ fname)
+
+ # Add any handlers in the cloud-dir
+ register_handlers_in_dir(cdir)
+
+ # Register any other handlers that come from the default set. This
+ # is done after the cloud-dir handlers so that the cdir modules can
+ # take over the default user-data handler content-types.
def_handlers = self._default_userdata_handlers()
applied_def_handlers = c_handlers.register_defaults(def_handlers)
if applied_def_handlers:
@@ -383,36 +394,51 @@ class Init(object):
# Form our cloud interface
data = self.cloudify()
- # Init the handlers first
- called = []
- for (_ctype, mod) in c_handlers.iteritems():
- if mod in called:
- continue
- handlers.call_begin(mod, data, frequency)
- called.append(mod)
-
- # Walk the user data
- part_data = {
- 'handlers': c_handlers,
- # Any new handlers that are encountered get writen here
- 'handlerdir': idir,
- 'data': data,
- # The default frequency if handlers don't have one
- 'frequency': frequency,
- # This will be used when new handlers are found
- # to help write there contents to files with numbered
- # names...
- 'handlercount': 0,
- }
- handlers.walk(user_data_msg, handlers.walker_callback, data=part_data)
+ def init_handlers():
+ # Init the handlers first
+ for (_ctype, mod) in c_handlers.iteritems():
+ if mod in c_handlers.initialized:
+ # Avoid initing the same module twice (if said module
+ # is registered to more than one content-type).
+ continue
+ handlers.call_begin(mod, data, frequency)
+ c_handlers.initialized.append(mod)
+
+ def walk_handlers():
+ # Walk the user data
+ part_data = {
+ 'handlers': c_handlers,
+ # Any new handlers that are encountered get writen here
+ 'handlerdir': idir,
+ 'data': data,
+ # The default frequency if handlers don't have one
+ 'frequency': frequency,
+ # This will be used when new handlers are found
+ # to help write there contents to files with numbered
+ # names...
+ 'handlercount': 0,
+ }
+ handlers.walk(user_data_msg, handlers.walker_callback,
+ data=part_data)
+
+ def finalize_handlers():
+ # Give callbacks opportunity to finalize
+ for (_ctype, mod) in c_handlers.iteritems():
+ if mod not in c_handlers.initialized:
+ # Said module was never inited in the first place, so lets
+ # not attempt to finalize those that never got called.
+ continue
+ c_handlers.initialized.remove(mod)
+ try:
+ handlers.call_end(mod, data, frequency)
+ except:
+ util.logexc(LOG, "Failed to finalize handler: %s", mod)
- # Give callbacks opportunity to finalize
- called = []
- for (_ctype, mod) in c_handlers.iteritems():
- if mod in called:
- continue
- handlers.call_end(mod, data, frequency)
- called.append(mod)
+ try:
+ init_handlers()
+ walk_handlers()
+ finally:
+ finalize_handlers()
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
@@ -488,7 +514,7 @@ class Modules(object):
else:
raise TypeError(("Failed to read '%s' item in config,"
" unknown type %s") %
- (item, util.obj_name(item)))
+ (item, type_utils.obj_name(item)))
return module_list
def _fixup_modules(self, raw_mods):
@@ -506,7 +532,7 @@ class Modules(object):
# Reset it so when ran it will get set to a known value
freq = None
mod_locs = importer.find_module(mod_name,
- ['', util.obj_name(config)],
+ ['', type_utils.obj_name(config)],
['handle'])
if not mod_locs:
LOG.warn("Could not find module named %s", mod_name)
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
new file mode 100644
index 00000000..2decbfc5
--- /dev/null
+++ b/cloudinit/type_utils.py
@@ -0,0 +1,34 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=C0302
+
+import types
+
+
+def obj_name(obj):
+ if isinstance(obj, (types.TypeType,
+ types.ModuleType,
+ types.FunctionType,
+ types.LambdaType)):
+ return str(obj.__name__)
+ return obj_name(obj.__class__)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index f3e3fd7e..19a30409 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -20,43 +20,55 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from contextlib import closing
-
-import errno
-import socket
import time
-import urllib
-import urllib2
+
+import requests
+from requests import exceptions
+
+from urlparse import (urlparse, urlunparse)
from cloudinit import log as logging
from cloudinit import version
LOG = logging.getLogger(__name__)
+# Check if requests has ssl support (added in requests >= 0.8.8)
+SSL_ENABLED = False
+CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
+try:
+ from distutils.version import LooseVersion
+ import pkg_resources
+ _REQ = pkg_resources.get_distribution('requests')
+ _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=E1103
+ if _REQ_VER >= LooseVersion('0.8.8'):
+ SSL_ENABLED = True
+ if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'):
+ CONFIG_ENABLED = True
+except:
+ pass
+
+
+def _cleanurl(url):
+ parsed_url = list(urlparse(url, scheme='http')) # pylint: disable=E1123
+ if not parsed_url[1] and parsed_url[2]:
+ # Swap these since this seems to be a common
+ # occurrence when given urls like 'www.google.com'
+ parsed_url[1] = parsed_url[2]
+ parsed_url[2] = ''
+ return urlunparse(parsed_url)
-class UrlResponse(object):
- def __init__(self, status_code, contents=None, headers=None):
- self._status_code = status_code
- self._contents = contents
- self._headers = headers
- @property
- def code(self):
- return self._status_code
+class UrlResponse(object):
+ def __init__(self, response):
+ self._response = response
@property
def contents(self):
- return self._contents
+ return self._response.content
@property
- def headers(self):
- return self._headers
-
- def __str__(self):
- if not self.contents:
- return ''
- else:
- return str(self.contents)
+ def url(self):
+ return self._response.url
def ok(self, redirects_ok=False):
upper = 300
@@ -67,72 +79,130 @@ class UrlResponse(object):
else:
return False
+ @property
+ def headers(self):
+ return self._response.headers
-def readurl(url, data=None, timeout=None,
- retries=0, sec_between=1, headers=None):
-
- req_args = {}
- req_args['url'] = url
- if data is not None:
- req_args['data'] = urllib.urlencode(data)
+ @property
+ def code(self):
+ return self._response.status_code
+ def __str__(self):
+ return self.contents
+
+
+class UrlError(IOError):
+ def __init__(self, cause, code=None, headers=None):
+ IOError.__init__(self, str(cause))
+ self.cause = cause
+ self.code = code
+ self.headers = headers
+ if self.headers is None:
+ self.headers = {}
+
+
+def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
+ headers=None, headers_cb=None, ssl_details=None,
+ check_status=True, allow_redirects=True):
+ url = _cleanurl(url)
+ req_args = {
+ 'url': url,
+ }
+ scheme = urlparse(url).scheme # pylint: disable=E1101
+ if scheme == 'https' and ssl_details:
+ if not SSL_ENABLED:
+ LOG.warn("SSL is not enabled, cert. verification can not occur!")
+ else:
+ if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
+ req_args['verify'] = ssl_details['ca_certs']
+ else:
+ req_args['verify'] = True
+ if 'cert_file' in ssl_details and 'key_file' in ssl_details:
+ req_args['cert'] = [ssl_details['cert_file'],
+ ssl_details['key_file']]
+ elif 'cert_file' in ssl_details:
+ req_args['cert'] = str(ssl_details['cert_file'])
+
+ req_args['allow_redirects'] = allow_redirects
+ req_args['method'] = 'GET'
+ if timeout is not None:
+ req_args['timeout'] = max(float(timeout), 0)
+ if data:
+ req_args['method'] = 'POST'
+ # It doesn't seem like config
+ # was added in older library versions (or newer ones either), thus we
+ # need to manually do the retries if it wasn't...
+ if CONFIG_ENABLED:
+ req_config = {
+ 'store_cookies': False,
+ }
+ # Don't use the retry support built-in
+ # since it doesn't allow for 'sleep_times'
+ # in between tries....
+ # if retries:
+ # req_config['max_retries'] = max(int(retries), 0)
+ req_args['config'] = req_config
+ manual_tries = 1
+ if retries:
+ manual_tries = max(int(retries) + 1, 1)
if not headers:
headers = {
'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
}
-
- req_args['headers'] = headers
- req = urllib2.Request(**req_args)
-
- retries = max(retries, 0)
- attempts = retries + 1
-
- excepts = []
- LOG.debug(("Attempting to open '%s' with %s attempts"
- " (%s retries, timeout=%s) to be performed"),
- url, attempts, retries, timeout)
- open_args = {}
- if timeout is not None:
- open_args['timeout'] = int(timeout)
- for i in range(0, attempts):
+ if not headers_cb:
+ def _cb(url):
+ return headers
+ headers_cb = _cb
+
+ if data:
+ # Do this after the log (it might be large)
+ req_args['data'] = data
+ if sec_between is None:
+ sec_between = -1
+ excps = []
+ # Handle retrying ourselves since the built-in support
+ # doesn't handle sleeping between tries...
+ for i in range(0, manual_tries):
try:
- with closing(urllib2.urlopen(req, **open_args)) as rh:
- content = rh.read()
- status = rh.getcode()
- if status is None:
- # This seems to happen when files are read...
- status = 200
- headers = {}
- if rh.headers:
- headers = dict(rh.headers)
- LOG.debug("Read from %s (%s, %sb) after %s attempts",
- url, status, len(content), (i + 1))
- return UrlResponse(status, content, headers)
- except urllib2.HTTPError as e:
- excepts.append(e)
- except urllib2.URLError as e:
- # This can be a message string or
- # another exception instance
- # (socket.error for remote URLs, OSError for local URLs).
- if (isinstance(e.reason, (OSError)) and
- e.reason.errno == errno.ENOENT):
- excepts.append(e.reason)
+ req_args['headers'] = headers_cb(url)
+ filtered_req_args = {}
+ for (k, v) in req_args.items():
+ if k == 'data':
+ continue
+ filtered_req_args[k] = v
+
+ LOG.debug("[%s/%s] open '%s' with %s configuration", i,
+ manual_tries, url, filtered_req_args)
+
+ r = requests.request(**req_args)
+ if check_status:
+ r.raise_for_status() # pylint: disable=E1103
+ LOG.debug("Read from %s (%s, %sb) after %s attempts", url,
+ r.status_code, len(r.content), # pylint: disable=E1103
+ (i + 1))
+ # Doesn't seem like we can make it use a different
+ # subclass for responses, so add our own backward-compat
+ # attrs
+ return UrlResponse(r)
+ except exceptions.RequestException as e:
+ if (isinstance(e, (exceptions.HTTPError))
+ and hasattr(e, 'response') # This appeared in v 0.10.8
+ and hasattr(e.response, 'status_code')):
+ excps.append(UrlError(e, code=e.response.status_code,
+ headers=e.response.headers))
else:
- excepts.append(e)
- except Exception as e:
- excepts.append(e)
- if i + 1 < attempts:
- LOG.debug("Please wait %s seconds while we wait to try again",
- sec_between)
- time.sleep(sec_between)
-
- # Didn't work out
- LOG.debug("Failed reading from %s after %s attempts", url, attempts)
-
- # It must of errored at least once for code
- # to get here so re-raise the last error
- LOG.debug("%s errors occured, re-raising the last one", len(excepts))
- raise excepts[-1]
+ excps.append(UrlError(e))
+ if SSL_ENABLED and isinstance(e, exceptions.SSLError):
+ # ssl exceptions are not going to get fixed by waiting a
+ # few seconds
+ break
+ if i + 1 < manual_tries and sec_between > 0:
+ LOG.debug("Please wait %s seconds while we wait to try again",
+ sec_between)
+ time.sleep(sec_between)
+ if excps:
+ raise excps[-1]
+ return None # Should throw before this...
def wait_for_url(urls, max_wait=None, timeout=None,
@@ -143,7 +213,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
max_wait: roughly the maximum time to wait before giving up
The max time is *actually* len(urls)*timeout as each url will
be tried once and given the timeout provided.
- timeout: the timeout provided to urllib2.urlopen
+ timeout: the timeout provided to urlopen
status_cb: call method with string message when a url is not available
headers_cb: call method with single argument of url to get headers
for request.
@@ -190,36 +260,40 @@ def wait_for_url(urls, max_wait=None, timeout=None,
timeout = int((start_time + max_wait) - now)
reason = ""
+ e = None
try:
if headers_cb is not None:
headers = headers_cb(url)
else:
headers = {}
- resp = readurl(url, headers=headers, timeout=timeout)
- if not resp.contents:
- reason = "empty response [%s]" % (resp.code)
- e = ValueError(reason)
- elif not resp.ok():
- reason = "bad status code [%s]" % (resp.code)
- e = ValueError(reason)
+ response = readurl(url, headers=headers, timeout=timeout,
+ check_status=False)
+ if not response.contents:
+ reason = "empty response [%s]" % (response.code)
+ e = UrlError(ValueError(reason),
+ code=response.code, headers=response.headers)
+ elif not response.ok():
+ reason = "bad status code [%s]" % (response.code)
+ e = UrlError(ValueError(reason),
+ code=response.code, headers=response.headers)
else:
return url
- except urllib2.HTTPError as e:
- reason = "http error [%s]" % e.code
- except urllib2.URLError as e:
- reason = "url error [%s]" % e.reason
- except socket.timeout as e:
- reason = "socket timeout [%s]" % e
+ except UrlError as e:
+ reason = "request error [%s]" % e
except Exception as e:
reason = "unexpected error [%s]" % e
time_taken = int(time.time() - start_time)
status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,
- time_taken,
- max_wait, reason)
+ time_taken,
+ max_wait,
+ reason)
status_cb(status_msg)
if exception_cb:
+ # This can be used to alter the headers that will be sent
+ # in the future, for example this is what the MAAS datasource
+ # does.
exception_cb(msg=status_msg, exception=e)
if timeup(max_wait, start_time):
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 58827e3d..d49ea094 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -23,13 +23,14 @@
import os
import email
+
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
+from email.mime.nonmultipart import MIMENonMultipart
from email.mime.text import MIMEText
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -49,6 +50,18 @@ ARCHIVE_TYPES = ["text/cloud-config-archive"]
UNDEF_TYPE = "text/plain"
ARCHIVE_UNDEF_TYPE = "text/cloud-config"
+# This seems to hit most of the gzip possible content types.
+DECOMP_TYPES = [
+ 'application/gzip',
+ 'application/gzip-compressed',
+ 'application/gzipped',
+ 'application/x-compress',
+ 'application/x-compressed',
+ 'application/x-gunzip',
+ 'application/x-gzip',
+ 'application/x-gzip-compressed',
+]
+
# Msg header used to track attachments
ATTACHMENT_FIELD = 'Number-Attachments'
@@ -57,9 +70,21 @@ ATTACHMENT_FIELD = 'Number-Attachments'
EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"]
+def _replace_header(msg, key, value):
+ del msg[key]
+ msg[key] = value
+
+
+def _set_filename(msg, filename):
+ del msg['Content-Disposition']
+ msg.add_header('Content-Disposition',
+ 'attachment', filename=str(filename))
+
+
class UserDataProcessor(object):
def __init__(self, paths):
self.paths = paths
+ self.ssl_details = util.fetch_ssl_details(paths)
def process(self, blob):
accumulating_msg = MIMEMultipart()
@@ -67,6 +92,10 @@ class UserDataProcessor(object):
return accumulating_msg
def _process_msg(self, base_msg, append_msg):
+
+ def find_ctype(payload):
+ return handlers.type_from_starts_with(payload)
+
for part in base_msg.walk():
if is_skippable(part):
continue
@@ -74,21 +103,51 @@ class UserDataProcessor(object):
ctype = None
ctype_orig = part.get_content_type()
payload = part.get_payload(decode=True)
+ was_compressed = False
+
+ # When the message states it is of a gzipped content type ensure
+ # that we attempt to decode said payload so that the decompressed
+ # data can be examined (instead of the compressed data).
+ if ctype_orig in DECOMP_TYPES:
+ try:
+ payload = util.decomp_gzip(payload, quiet=False)
+ # At this point we don't know what the content-type is
+ # since we just decompressed it.
+ ctype_orig = None
+ was_compressed = True
+ except util.DecompressionError as e:
+ LOG.warn("Failed decompressing payload from %s of length"
+ " %s due to: %s", ctype_orig, len(payload), e)
+ continue
+ # Attempt to figure out the payloads content-type
if not ctype_orig:
ctype_orig = UNDEF_TYPE
-
if ctype_orig in TYPE_NEEDED:
- ctype = handlers.type_from_starts_with(payload)
-
+ ctype = find_ctype(payload)
if ctype is None:
ctype = ctype_orig
+ # In the case where the data was compressed, we want to make sure
+ # that we create a new message that contains the found content
+ # type with the uncompressed content since later traversals of the
+ # messages will expect a part not compressed.
+ if was_compressed:
+ maintype, subtype = ctype.split("/", 1)
+ n_part = MIMENonMultipart(maintype, subtype)
+ n_part.set_payload(payload)
+ # Copy various headers from the old part to the new one,
+ # but don't include all the headers since some are not useful
+ # after decoding and decompression.
+ if part.get_filename():
+ _set_filename(n_part, part.get_filename())
+ for h in ('Launch-Index',):
+ if h in part:
+ _replace_header(n_part, h, str(part[h]))
+ part = n_part
+
if ctype != ctype_orig:
- if CONTENT_TYPE in part:
- part.replace_header(CONTENT_TYPE, ctype)
- else:
- part[CONTENT_TYPE] = ctype
+ _replace_header(part, CONTENT_TYPE, ctype)
if ctype in INCLUDE_TYPES:
self._do_include(payload, append_msg)
@@ -98,12 +157,9 @@ class UserDataProcessor(object):
self._explode_archive(payload, append_msg)
continue
- # Should this be happening, shouldn't
+ # TODO(harlowja): Should this be happening, shouldn't
# the part header be modified and not the base?
- if CONTENT_TYPE in base_msg:
- base_msg.replace_header(CONTENT_TYPE, ctype)
- else:
- base_msg[CONTENT_TYPE] = ctype
+ _replace_header(base_msg, CONTENT_TYPE, ctype)
self._attach_part(append_msg, part)
@@ -138,8 +194,7 @@ class UserDataProcessor(object):
def _process_before_attach(self, msg, attached_id):
if not msg.get_filename():
- msg.add_header('Content-Disposition',
- 'attachment', filename=PART_FN_TPL % (attached_id))
+ _set_filename(msg, PART_FN_TPL % (attached_id))
self._attach_launch_index(msg)
def _do_include(self, content, append_msg):
@@ -173,7 +228,8 @@ class UserDataProcessor(object):
if include_once_on and os.path.isfile(include_once_fn):
content = util.load_file(include_once_fn)
else:
- resp = url_helper.readurl(include_url)
+ resp = util.read_file_or_url(include_url,
+ ssl_details=self.ssl_details)
if include_once_on and resp.ok():
util.write_file(include_once_fn, str(resp), mode=0600)
if resp.ok():
@@ -216,13 +272,15 @@ class UserDataProcessor(object):
msg.set_payload(content)
if 'filename' in ent:
- msg.add_header('Content-Disposition',
- 'attachment', filename=ent['filename'])
+ _set_filename(msg, ent['filename'])
if 'launch-index' in ent:
msg.add_header('Launch-Index', str(ent['launch-index']))
for header in list(ent.keys()):
- if header in ('content', 'filename', 'type', 'launch-index'):
+ if header.lower() in ('content', 'filename', 'type',
+ 'launch-index', 'content-disposition',
+ ATTACHMENT_FIELD.lower(),
+ CONTENT_TYPE.lower()):
continue
msg.add_header(header, ent[header])
@@ -237,13 +295,13 @@ class UserDataProcessor(object):
outer_msg[ATTACHMENT_FIELD] = '0'
if new_count is not None:
- outer_msg.replace_header(ATTACHMENT_FIELD, str(new_count))
+ _replace_header(outer_msg, ATTACHMENT_FIELD, str(new_count))
fetched_count = 0
try:
fetched_count = int(outer_msg.get(ATTACHMENT_FIELD))
except (ValueError, TypeError):
- outer_msg.replace_header(ATTACHMENT_FIELD, str(fetched_count))
+ _replace_header(outer_msg, ATTACHMENT_FIELD, str(fetched_count))
return fetched_count
def _attach_part(self, outer_msg, part):
@@ -275,10 +333,7 @@ def convert_string(raw_data, headers=None):
if "mime-version:" in data[0:4096].lower():
msg = email.message_from_string(data)
for (key, val) in headers.iteritems():
- if key in msg:
- msg.replace_header(key, val)
- else:
- msg[key] = val
+ _replace_header(msg, key, val)
else:
mtype = headers.get(CONTENT_TYPE, NOT_MULTIPART_TYPE)
maintype, subtype = mtype.split("/", 1)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 7b1202a2..e1c51f31 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1,7 +1,7 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
@@ -43,15 +43,16 @@ import subprocess
import sys
import tempfile
import time
-import types
import urlparse
import yaml
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import mergers
from cloudinit import safeyaml
-from cloudinit import url_helper as uhelp
+from cloudinit import type_utils
+from cloudinit import url_helper
from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
@@ -70,6 +71,31 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
+# Made to have same accessors as UrlResponse so that the
+# read_file_or_url can return this or that object and the
+# 'user' of those objects will not need to know the difference.
+class StringResponse(object):
+ def __init__(self, contents, code=200):
+ self.code = code
+ self.headers = {}
+ self.contents = contents
+ self.url = None
+
+ def ok(self, *args, **kwargs): # pylint: disable=W0613
+ if self.code != 200:
+ return False
+ return True
+
+ def __str__(self):
+ return self.contents
+
+
+class FileResponse(StringResponse):
+ def __init__(self, path, contents, code=200):
+ StringResponse.__init__(self, contents, code=code)
+ self.url = path
+
+
class ProcessExecutionError(IOError):
MESSAGE_TMPL = ('%(description)s\n'
@@ -193,12 +219,12 @@ def fork_cb(child_cb, *args):
child_cb(*args)
os._exit(0) # pylint: disable=W0212
except:
- logexc(LOG, ("Failed forking and"
- " calling callback %s"), obj_name(child_cb))
+ logexc(LOG, "Failed forking and calling callback %s",
+ type_utils.obj_name(child_cb))
os._exit(1) # pylint: disable=W0212
else:
LOG.debug("Forked child %s who will run callback %s",
- fid, obj_name(child_cb))
+ fid, type_utils.obj_name(child_cb))
def is_true(val, addons=None):
@@ -381,6 +407,7 @@ def system_info():
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
+ 'dist': platform.linux_distribution(),
}
@@ -460,7 +487,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
+ new_fp = proc.stdin # pylint: disable=E1101
else:
raise TypeError("Invalid type for output format: %s" % outfmt)
@@ -482,7 +509,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
+ new_fp = proc.stdin # pylint: disable=E1101
else:
raise TypeError("Invalid type for error format: %s" % errfmt)
@@ -512,38 +539,19 @@ def make_url(scheme, host, port=None,
return urlparse.urlunparse(pieces)
-def obj_name(obj):
- if isinstance(obj, (types.TypeType,
- types.ModuleType,
- types.FunctionType,
- types.LambdaType)):
- return str(obj.__name__)
- return obj_name(obj.__class__)
-
-
def mergemanydict(srcs, reverse=False):
if reverse:
srcs = reversed(srcs)
- m_cfg = {}
- for a_cfg in srcs:
- if a_cfg:
- m_cfg = mergedict(m_cfg, a_cfg)
- return m_cfg
-
-
-def mergedict(src, cand):
- """
- Merge values from C{cand} into C{src}.
- If C{src} has a key C{cand} will not override.
- Nested dictionaries are merged recursively.
- """
- if isinstance(src, dict) and isinstance(cand, dict):
- for (k, v) in cand.iteritems():
- if k not in src:
- src[k] = v
- else:
- src[k] = mergedict(src[k], v)
- return src
+ merged_cfg = {}
+ for cfg in srcs:
+ if cfg:
+ # Figure out which mergers to apply...
+ mergers_to_apply = mergers.dict_extract_mergers(cfg)
+ if not mergers_to_apply:
+ mergers_to_apply = mergers.default_mergers()
+ merger = mergers.construct(mergers_to_apply)
+ merged_cfg = merger.merge(merged_cfg, cfg)
+ return merged_cfg
@contextlib.contextmanager
@@ -618,18 +626,64 @@ def read_optional_seed(fill, base="", ext="", timeout=5):
fill['user-data'] = ud
fill['meta-data'] = md
return True
- except OSError as e:
+ except IOError as e:
if e.errno == errno.ENOENT:
return False
raise
-def read_file_or_url(url, timeout=5, retries=10, file_retries=0):
+def fetch_ssl_details(paths=None):
+ ssl_details = {}
+ # Lookup in these locations for ssl key/cert files
+ ssl_cert_paths = [
+ '/var/lib/cloud/data/ssl',
+ '/var/lib/cloud/instance/data/ssl',
+ ]
+ if paths:
+ ssl_cert_paths.extend([
+ os.path.join(paths.get_ipath_cur('data'), 'ssl'),
+ os.path.join(paths.get_cpath('data'), 'ssl'),
+ ])
+ ssl_cert_paths = uniq_merge(ssl_cert_paths)
+ ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
+ cert_file = None
+ for d in ssl_cert_paths:
+ if os.path.isfile(os.path.join(d, 'cert.pem')):
+ cert_file = os.path.join(d, 'cert.pem')
+ break
+ key_file = None
+ for d in ssl_cert_paths:
+ if os.path.isfile(os.path.join(d, 'key.pem')):
+ key_file = os.path.join(d, 'key.pem')
+ break
+ if cert_file and key_file:
+ ssl_details['cert_file'] = cert_file
+ ssl_details['key_file'] = key_file
+ elif cert_file:
+ ssl_details['cert_file'] = cert_file
+ return ssl_details
+
+
+def read_file_or_url(url, timeout=5, retries=10,
+ headers=None, data=None, sec_between=1, ssl_details=None,
+ headers_cb=None):
+ url = url.lstrip()
if url.startswith("/"):
url = "file://%s" % url
- if url.startswith("file://"):
- retries = file_retries
- return uhelp.readurl(url, timeout=timeout, retries=retries)
+ if url.lower().startswith("file://"):
+ if data:
+ LOG.warn("Unable to post data to file resource %s", url)
+ file_path = url[len("file://"):]
+ return FileResponse(file_path, contents=load_file(file_path))
+ else:
+ return url_helper.readurl(url,
+ timeout=timeout,
+ retries=retries,
+ headers=headers,
+ headers_cb=headers_cb,
+ data=data,
+ sec_between=sec_between,
+ ssl_details=ssl_details)
def load_yaml(blob, default=None, allowed=(dict,)):
@@ -644,7 +698,7 @@ def load_yaml(blob, default=None, allowed=(dict,)):
# Yes this will just be caught, but thats ok for now...
raise TypeError(("Yaml load allows %s root types,"
" but got %s instead") %
- (allowed, obj_name(converted)))
+ (allowed, type_utils.obj_name(converted)))
loaded = converted
except (yaml.YAMLError, TypeError, ValueError):
if len(blob) == 0:
@@ -713,7 +767,7 @@ def read_conf_with_confd(cfgfile):
if not isinstance(confd, (str, basestring)):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
- (cfgfile, obj_name(confd)))
+ (cfgfile, type_utils.obj_name(confd)))
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
@@ -724,7 +778,7 @@ def read_conf_with_confd(cfgfile):
# Conf.d settings override input configuration
confd_cfg = read_conf_d(confd)
- return mergedict(confd_cfg, cfg)
+ return mergemanydict([confd_cfg, cfg])
def read_cc_from_cmdline(cmdline=None):
@@ -846,7 +900,7 @@ def get_cmdline_url(names=('cloud-config-url', 'url'),
if not url:
return (None, None, None)
- resp = uhelp.readurl(url)
+ resp = read_file_or_url(url)
if resp.contents.startswith(starts) and resp.ok():
return (key, url, str(resp))
@@ -879,7 +933,7 @@ def is_resolvable(name):
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
badips.add(sockaddr[0])
- except socket.gaierror:
+ except (socket.gaierror, socket.error):
pass
_DNS_REDIRECT_IP = badips
if badresults:
@@ -892,7 +946,7 @@ def is_resolvable(name):
if addr in _DNS_REDIRECT_IP:
return False
return True
- except socket.gaierror:
+ except (socket.gaierror, socket.error):
return False
@@ -1428,7 +1482,7 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
(out, err) = sp.communicate(data)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e)
- rc = sp.returncode
+ rc = sp.returncode # pylint: disable=E1101
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
exit_code=rc,
@@ -1478,11 +1532,19 @@ def shellify(cmdlist, add_header=True):
else:
raise RuntimeError(("Unable to shellify type %s"
" which is not a list or string")
- % (obj_name(args)))
+ % (type_utils.obj_name(args)))
LOG.debug("Shellified %s commands.", cmds_made)
return content
+def strip_prefix_suffix(line, prefix=None, suffix=None):
+ if prefix and line.startswith(prefix):
+ line = line[len(prefix):]
+ if suffix and line.endswith(suffix):
+ line = line[:-len(suffix)]
+ return line
+
+
def is_container():
"""
Checks to see if this code running in a container of some sort
@@ -1537,7 +1599,7 @@ def get_proc_env(pid):
fn = os.path.join("/proc/", str(pid), "environ")
try:
contents = load_file(fn)
- toks = contents.split("\0")
+ toks = contents.split("\x00")
for tok in toks:
if tok == "":
continue
@@ -1593,3 +1655,160 @@ def expand_package_list(version_fmt, pkgs):
raise RuntimeError("Invalid package type.")
return pkglist
+
+
+def parse_mount_info(path, mountinfo_lines, log=LOG):
+ """Return the mount information for PATH given the lines from
+ /proc/$$/mountinfo."""
+
+ path_elements = [e for e in path.split('/') if e]
+ devpth = None
+ fs_type = None
+ match_mount_point = None
+ match_mount_point_elements = None
+ for i, line in enumerate(mountinfo_lines):
+ parts = line.split()
+
+ # Completely fail if there is anything in any line that is
+ # unexpected, as continuing to parse past a bad line could
+ # cause an incorrect result to be returned, so it's better
+ # return nothing than an incorrect result.
+
+ # The minimum number of elements in a valid line is 10.
+ if len(parts) < 10:
+ log.debug("Line %d has two few columns (%d): %s",
+ i + 1, len(parts), line)
+ return None
+
+ mount_point = parts[4]
+ mount_point_elements = [e for e in mount_point.split('/') if e]
+
+ # Ignore mounts deeper than the path in question.
+ if len(mount_point_elements) > len(path_elements):
+ continue
+
+ # Ignore mounts where the common path is not the same.
+ l = min(len(mount_point_elements), len(path_elements))
+ if mount_point_elements[0:l] != path_elements[0:l]:
+ continue
+
+ # Ignore mount points higher than an already seen mount
+ # point.
+ if (match_mount_point_elements is not None and
+ len(match_mount_point_elements) > len(mount_point_elements)):
+ continue
+
+ # Find the '-' which terminates a list of optional columns to
+ # find the filesystem type and the path to the device. See
+ # man 5 proc for the format of this file.
+ try:
+ i = parts.index('-')
+ except ValueError:
+ log.debug("Did not find column named '-' in line %d: %s",
+ i + 1, line)
+ return None
+
+ # Get the path to the device.
+ try:
+ fs_type = parts[i + 1]
+ devpth = parts[i + 2]
+ except IndexError:
+ log.debug("Too few columns after '-' column in line %d: %s",
+ i + 1, line)
+ return None
+
+ match_mount_point = mount_point
+ match_mount_point_elements = mount_point_elements
+
+ if devpth and fs_type and match_mount_point:
+ return (devpth, fs_type, match_mount_point)
+ else:
+ return None
+
+
+def get_mount_info(path, log=LOG):
+ # Use /proc/$$/mountinfo to find the device where path is mounted.
+ # This is done because with a btrfs filesystem using os.stat(path)
+ # does not return the ID of the device.
+ #
+ # Here, / has a device of 18 (decimal).
+ #
+ # $ stat /
+ # File: '/'
+ # Size: 234 Blocks: 0 IO Block: 4096 directory
+ # Device: 12h/18d Inode: 256 Links: 1
+ # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
+ # Access: 2013-01-13 07:31:04.358011255 +0000
+ # Modify: 2013-01-13 18:48:25.930011255 +0000
+ # Change: 2013-01-13 18:48:25.930011255 +0000
+ # Birth: -
+ #
+ # Find where / is mounted:
+ #
+ # $ mount | grep ' / '
+ # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
+ #
+ # And the device ID for /dev/vda1 is not 18:
+ #
+ # $ ls -l /dev/vda1
+ # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
+ #
+ # So use /proc/$$/mountinfo to find the device underlying the
+ # input path.
+ mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
+ lines = load_file(mountinfo_path).splitlines()
+ return parse_mount_info(path, lines, log)
+
+
+def which(program):
+ # Return path of program for execution if found in path
+ def is_exe(fpath):
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+ _fpath, _ = os.path.split(program)
+ if _fpath:
+ if is_exe(program):
+ return program
+ else:
+ for path in os.environ["PATH"].split(os.pathsep):
+ path = path.strip('"')
+ exe_file = os.path.join(path, program)
+ if is_exe(exe_file):
+ return exe_file
+
+ return None
+
+
+def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
+
+ start = time.time()
+
+ ustart = None
+ if get_uptime:
+ try:
+ ustart = float(uptime())
+ except ValueError:
+ pass
+
+ try:
+ ret = func(*args, **kwargs)
+ finally:
+ delta = time.time() - start
+ if ustart is not None:
+ try:
+ udelta = float(uptime()) - ustart
+ except ValueError:
+ udelta = "N/A"
+
+ tmsg = " took %0.3f seconds" % delta
+ if get_uptime:
+ tmsg += "(%0.2f)" % udelta
+ try:
+ logfunc(msg + tmsg)
+ except:
+ pass
+ return ret
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 024d5118..4b29a587 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -20,7 +20,7 @@ from distutils import version as vr
def version():
- return vr.StrictVersion("0.7.2")
+ return vr.StrictVersion("0.7.3")
def version_string():
diff --git a/config/cloud.cfg b/config/cloud.cfg
index a8c74486..b61b8a7d 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -26,6 +26,7 @@ cloud_init_modules:
- migrator
- bootcmd
- write-files
+ - growpart
- resizefs
- set_hostname
- update_hostname
diff --git a/doc/examples/cloud-config-TODO.txt b/doc/examples/cloud-config-TODO.txt
deleted file mode 100644
index c7ed54ab..00000000
--- a/doc/examples/cloud-config-TODO.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-# Add apt configuration files
-# Add an apt.conf.d/ file with the relevant content
-#
-# See apt.conf man page for more information.
-#
-# Defaults:
-# + filename: 00-boot-conf
-#
-apt_conf:
-
- # Creates an apt proxy configuration in /etc/apt/apt.conf.d/01-proxy
- - filename: "01-proxy"
- content: |
- Acquire::http::Proxy "http://proxy.example.org:3142/ubuntu";
-
- # Add the following line to /etc/apt/apt.conf.d/00-boot-conf
- # (run debconf at a critical priority)
- - content: |
- DPkg::Pre-Install-Pkgs:: "/usr/sbin/dpkg-preconfigure --apt -p critical|| true";
-
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index fc8c22d4..65a3cdf5 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -42,3 +42,26 @@ datasource:
meta-data:
instance-id: i-87018aed
local-hostname: myhost.internal
+
+ Azure:
+ agent_command: [service, walinuxagent, start]
+ set_hostname: True
+ hostname_bounce:
+ interface: eth0
+ policy: on # [can be 'on', 'off' or 'force']
+ }
+
+ SmartOS:
+ # Smart OS datasource works over a serial console interacting with
+ # a server on the other end. By default, the second serial console is the
+ # device. SmartOS also uses a serial timeout of 60 seconds.
+ serial_device: /dev/ttyS1
+ serial_timeout: 60
+
+ # a list of keys that will not be base64 decoded even if base64_all
+ no_base64_decode: ['root_authorized_keys', 'motd_sys_info',
+ 'iptables_disable']
+ # a plaintext, comma delimited list of keys whose values are b64 encoded
+ base64_keys: []
+ # a boolean indicating that all keys not in 'no_base64_decode' are encoded
+ base64_all: False
diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt
new file mode 100644
index 00000000..705f02c2
--- /dev/null
+++ b/doc/examples/cloud-config-growpart.txt
@@ -0,0 +1,24 @@
+#cloud-config
+#
+# growpart entry is a dict, if it is not present at all
+# in config, then the default is used ({'mode': 'auto', 'devices': ['/']})
+#
+# mode:
+# values:
+# * auto: use any option possible (growpart or parted)
+# if none are available, do not warn, but debug.
+# * growpart: use growpart to grow partitions
+# if growpart is not available, this is an error.
+# * parted: use parted (parted resizepart) to resize partitions
+# if parted is not available, this is an error.
+# * off, false
+#
+# devices:
+# a list of things to resize.
+# items can be filesystem paths or devices (in /dev)
+# examples:
+# devices: [/, /dev/vdb1]
+#
+growpart:
+ mode: auto
+ devices: ['/']
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 09298655..bcfd7917 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -53,6 +53,9 @@ apt_mirror_search:
apt_mirror_search_dns: False
# apt_proxy (configure Acquire::HTTP::Proxy)
+# 'apt_http_proxy' is an alias for 'apt_proxy'.
+# Also, available are 'apt_ftp_proxy' and 'apt_https_proxy'.
+# These affect Acquire::FTP::Proxy and Acquire::HTTPS::Proxy respectively
apt_proxy: http://my.apt.proxy:3128
# apt_pipelining (configure Acquire::http::Pipeline-Depth)
@@ -125,6 +128,24 @@ apt_sources:
=Y2oI
-----END PGP PUBLIC KEY BLOCK-----
+## apt config via system_info:
+# under the 'system_info', you can further customize cloud-init's interaction
+# with apt.
+# system_info:
+# apt_get_command: [command, argument, argument]
+# apt_get_upgrade_subcommand: dist-upgrade
+#
+# apt_get_command:
+# To specify a different 'apt-get' command, set 'apt_get_command'.
+# This must be a list, and the subcommand (update, upgrade) is appended to it.
+# default is:
+# ['apt-get', '--option=Dpkg::Options::=--force-confold',
+# '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet']
+#
+# apt_get_upgrade_subcommand:
+# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'.
+# This is the subcommand that is invoked if package_upgrade is set to true above.
+
# Install additional packages on first boot
#
# Default: none
diff --git a/doc/merging.rst b/doc/merging.rst
new file mode 100644
index 00000000..d4d5cd05
--- /dev/null
+++ b/doc/merging.rst
@@ -0,0 +1,188 @@
+Overview
+--------
+
+This was done because it has been a common feature request that there be a
+way to specify how cloud-config yaml "dictionaries" are merged together when
+there are multiple yamls to merge together (say when performing an #include).
+
+Since previously the merging algorithm was very simple and would only overwrite
+and not append lists, or strings, and so on it was decided to create a new and
+improved way to merge dictionaries (and there contained objects) together in a
+way that is customizable, thus allowing for users who provide cloud-config data
+to determine exactly how there objects will be merged.
+
+For example.
+
+.. code-block:: yaml
+
+ #cloud-config (1)
+ run_cmd:
+ - bash1
+ - bash2
+
+ #cloud-config (2)
+ run_cmd:
+ - bash3
+ - bash4
+
+The previous way of merging the following 2 objects would result in a final
+cloud-config object that contains the following.
+
+.. code-block:: yaml
+
+ #cloud-config (merged)
+ run_cmd:
+ - bash3
+ - bash4
+
+Typically this is not what users want, instead they would likely prefer:
+
+.. code-block:: yaml
+
+ #cloud-config (merged)
+ run_cmd:
+ - bash1
+ - bash2
+ - bash3
+ - bash4
+
+This way makes it easier to combine the various cloud-config objects you have
+into a more useful list, thus reducing duplication that would have had to
+occur in the previous method to accomplish the same result.
+
+Customizability
+---------------
+
+Since the above merging algorithm may not always be the desired merging
+algorithm (like how the previous merging algorithm was not always the preferred
+one) the concept of customizing how merging can be done was introduced through
+a new concept call 'merge classes'.
+
+A merge class is a class defintion which provides functions that can be used
+to merge a given type with another given type.
+
+An example of one of these merging classes is the following:
+
+.. code-block:: python
+
+ class Merger(object):
+ def __init__(self, merger, opts):
+ self._merger = merger
+ self._overwrite = 'overwrite' in opts
+
+ # This merging algorithm will attempt to merge with
+ # another dictionary, on encountering any other type of object
+ # it will not merge with said object, but will instead return
+ # the original value
+ #
+ # On encountering a dictionary, it will create a new dictionary
+ # composed of the original and the one to merge with, if 'overwrite'
+ # is enabled then keys that exist in the original will be overwritten
+ # by keys in the one to merge with (and associated values). Otherwise
+ # if not in overwrite mode the 2 conflicting keys themselves will
+ # be merged.
+ def _on_dict(self, value, merge_with):
+ if not isinstance(merge_with, (dict)):
+ return value
+ merged = dict(value)
+ for (k, v) in merge_with.items():
+ if k in merged:
+ if not self._overwrite:
+ merged[k] = self._merger.merge(merged[k], v)
+ else:
+ merged[k] = v
+ else:
+ merged[k] = v
+ return merged
+
+As you can see there is a '_on_dict' method here that will be given a source value
+and a value to merge with. The result will be the merged object. This code itself
+is called by another merging class which 'directs' the merging to happen by
+analyzing the types of the objects to merge and attempting to find a know object
+that will merge that type. I will avoid pasting that here, but it can be found
+in the `mergers/__init__.py` file (see `LookupMerger` and `UnknownMerger`).
+
+So following the typical cloud-init way of allowing source code to be downloaded
+and used dynamically, it is possible for users to inject there own merging files
+to handle specific types of merging as they choose (the basic ones included will
+handle lists, dicts, and strings). Note how each merge can have options associated
+with it which affect how the merging is performed, for example a dictionary merger
+can be told to overwrite instead of attempt to merge, or a string merger can be
+told to append strings instead of discarding other strings to merge with.
+
+How to activate
+---------------
+
+There are a few ways to activate the merging algorithms, and to customize them
+for your own usage.
+
+1. The first way involves the usage of MIME messages in cloud-init to specify
+ multipart documents (this is one way in which multiple cloud-config is joined
+ together into a single cloud-config). Two new headers are looked for, both
+ of which can define the way merging is done (the first header to exist wins).
+ These new headers (in lookup order) are 'Merge-Type' and 'X-Merge-Type'. The value
+ should be a string which will satisfy the new merging format defintion (see
+ below for this format).
+2. The second way is actually specifying the merge-type in the body of the
+ cloud-config dictionary. There are 2 ways to specify this, either as a string
+ or as a dictionary (see format below). The keys that are looked up for this
+ definition are the following (in order), 'merge_how', 'merge_type'.
+
+String format
+********
+
+The string format that is expected is the following.
+
+::
+
+ classname1(option1,option2)+classname2(option3,option4)....
+
+The class name there will be connected to class names used when looking for the
+class that can be used to merge and options provided will be given to the class
+on construction of that class.
+
+For example, the default string that is used when none is provided is the following:
+
+::
+
+ list()+dict()+str()
+
+Dictionary format
+********
+
+In cases where a dictionary can be used to specify the same information as the
+string format (ie option #2 of above) it can be used, for example.
+
+.. code-block:: python
+
+ {'merge_how': [{'name': 'list', 'settings': ['extend']},
+ {'name': 'dict', 'settings': []},
+ {'name': 'str', 'settings': ['append']}]}
+
+This would be the equivalent format for default string format but in dictionary
+form instead of string form.
+
+Specifying multiple types and its effect
+----------------------------------------
+
+Now you may be asking yourself, if I specify a merge-type header or dictionary
+for every cloud-config that I provide, what exactly happens?
+
+The answer is that when merging, a stack of 'merging classes' is kept, the
+first one on that stack is the default merging classes, this set of mergers
+will be used when the first cloud-config is merged with the initial empty
+cloud-config dictionary. If the cloud-config that was just merged provided a
+set of merging classes (via the above formats) then those merging classes will
+be pushed onto the stack. Now if there is a second cloud-config to be merged then
+the merging classes from the cloud-config before the first will be used (not the
+default) and so on. This way a cloud-config can decide how it will merge with a
+cloud-config dictionary coming after it.
+
+Other uses
+----------
+
+The default merging algorithm for merging 'conf.d' yaml files (which form a initial
+yaml config for cloud-init) was also changed to use this mechanism so its full
+benefits (and customization) can also be used there as well. Other places that
+used the previous merging are also similar now extensible (metadata merging for
+example).
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 87fc40ab..c9ae79f4 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -17,13 +17,13 @@ from cloudinit import version
# General information about the project.
project = 'Cloud-Init'
-# -- General configuration -----------------------------------------------------
+# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
]
@@ -55,7 +55,7 @@ exclude_patterns = []
# output. They are ignored by default.
show_authors = False
-# -- Options for HTML output ---------------------------------------------------
+# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 619bb5dc..fe04b1a9 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -24,6 +24,7 @@ Summary
topics/examples
topics/datasources
topics/modules
+ topics/merging
topics/moreinfo
topics/hacking
diff --git a/doc/rtd/static/logo.svg b/doc/rtd/static/logo.svg
index b22ce2a0..b22ce2a0 100755..100644
--- a/doc/rtd/static/logo.svg
+++ b/doc/rtd/static/logo.svg
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
new file mode 100644
index 00000000..8a03f3c7
--- /dev/null
+++ b/doc/rtd/topics/merging.rst
@@ -0,0 +1,5 @@
+=========
+Merging
+=========
+
+.. include:: ../../merging.rst
diff --git a/doc/sources/azure/README.rst b/doc/sources/azure/README.rst
new file mode 100644
index 00000000..8239d1fa
--- /dev/null
+++ b/doc/sources/azure/README.rst
@@ -0,0 +1,134 @@
+================
+Azure Datasource
+================
+
+This datasource finds metadata and user-data from the Azure cloud platform.
+
+Azure Platform
+--------------
+The azure cloud-platform provides initial data to an instance via an attached
+CD formated in UDF. That CD contains a 'ovf-env.xml' file that provides some
+information. Additional information is obtained via interaction with the
+"endpoint". The ip address of the endpoint is advertised to the instance
+inside of dhcp option 245. On ubuntu, that can be seen in
+/var/lib/dhcp/dhclient.eth0.leases as a colon delimited hex value (example:
+``option unknown-245 64:41:60:82;`` is 100.65.96.130)
+
+walinuxagent
+------------
+In order to operate correctly, cloud-init needs walinuxagent to provide much
+of the interaction with azure. In addition to "provisioning" code, walinux
+does the following on the agent is a long running daemon that handles the
+following things:
+- generate a x509 certificate and send that to the endpoint
+
+waagent.conf config
+~~~~~~~~~~~~~~~~~~~
+in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults.
+
+ ::
+
+ # disabling provisioning turns off all 'Provisioning.*' function
+ Provisioning.Enabled=n
+ # this is currently not handled by cloud-init, so let walinuxagent do it.
+ ResourceDisk.Format=y
+ ResourceDisk.MountPoint=/mnt
+
+
+Userdata
+--------
+Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init
+expects that user-data will be provided as base64 encoded value inside the
+text child of a element named ``UserData`` or ``CustomData`` which is a direct
+child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
+If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
+which will be selected.
+
+In the example below, user-data provided is 'this is my userdata', and the
+datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``.
+That agent command will take affect as if it were specified in system config.
+
+Example:
+
+.. code::
+
+ <wa:ProvisioningSection>
+ <wa:Version>1.0</wa:Version>
+ <LinuxProvisioningConfigurationSet
+ xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
+ <HostName>myHost</HostName>
+ <UserName>myuser</UserName>
+ <UserPassword/>
+ <CustomData>dGhpcyBpcyBteSB1c2VyZGF0YQ===</CustomData>
+ <dscfg>eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0=</dscfg>
+ <DisableSshPasswordAuthentication>true</DisableSshPasswordAuthentication>
+ <SSH>
+ <PublicKeys>
+ <PublicKey>
+ <Fingerprint>6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7</Fingerprint>
+ <Path>this-value-unused</Path>
+ </PublicKey>
+ </PublicKeys>
+ </SSH>
+ </LinuxProvisioningConfigurationSet>
+ </wa:ProvisioningSection>
+
+Configuration
+-------------
+Configuration for the datasource can be read from the system config's or set
+via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`. Content in
+dscfg node is expected to be base64 encoded yaml content, and it will be
+merged into the 'datasource: Azure' entry.
+
+The '``hostname_bounce: command``' entry can be either the literal string
+'builtin' or a command to execute. The command will be invoked after the
+hostname is set, and will have the 'interface' in its environment. If
+``set_hostname`` is not true, then ``hostname_bounce`` will be ignored.
+
+An example might be:
+ command: ["sh", "-c", "killall dhclient; dhclient $interface"]
+
+.. code::
+
+ datasource:
+ agent_command
+ Azure:
+ agent_command: [service, walinuxagent, start]
+ set_hostname: True
+ hostname_bounce:
+ # the name of the interface to bounce
+ interface: eth0
+ # policy can be 'on', 'off' or 'force'
+ policy: on
+ # the method 'bounce' command.
+ command: "builtin"
+ hostname_command: "hostname"
+ }
+
+hostname
+--------
+When the user launches an instance, they provide a hostname for that instance.
+The hostname is provided to the instance in the ovf-env.xml file as
+``HostName``.
+
+Whatever value the instance provides in its dhcp request will resolve in the
+domain returned in the 'search' request.
+
+The interesting issue is that a generic image will already have a hostname
+configured. The ubuntu cloud images have 'ubuntu' as the hostname of the
+system, and the initial dhcp request on eth0 is not guaranteed to occur after
+the datasource code has been run. So, on first boot, that initial value will
+be sent in the dhcp request and *that* value will resolve.
+
+In order to make the ``HostName`` provided in the ovf-env.xml resolve, a
+dhcp request must be made with the new value. Walinuxagent (in its current
+version) handles this by polling the state of hostname and bouncing ('``ifdown
+eth0; ifup eth0``' the network interface if it sees that a change has been
+made.
+
+cloud-init handles this by setting the hostname in the DataSource's 'get_data'
+method via '``hostname $HostName``', and then bouncing the interface. This
+behavior can be configured or disabled in the datasource config. See
+'Configuration' above.
diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst
new file mode 100644
index 00000000..fd4e496d
--- /dev/null
+++ b/doc/sources/smartos/README.rst
@@ -0,0 +1,72 @@
+==================
+SmartOS Datasource
+==================
+
+This datasource finds metadata and user-data from the SmartOS virtualization
+platform (i.e. Joyent).
+
+SmartOS Platform
+----------------
+The SmartOS virtualization platform meta-data to the instance via the second
+serial console. On Linux, this is /dev/ttyS1. The data is a provided via a
+simple protocol, where something queries for the userdata, where the console
+responds with the status and if "SUCCESS" returns until a single ".\n".
+
+New versions of the SmartOS tooling will include support for base64 encoded data.
+
+Userdata
+--------
+
+In SmartOS parlance, user-data is a actually meta-data. This userdata can be
+provided a key-value pairs.
+
+Cloud-init supports reading the traditional meta-data fields supported by the
+SmartOS tools. These are:
+ * root_authorized_keys
+ * hostname
+ * enable_motd_sys_info
+ * iptables_disable
+
+Note: At this time iptables_disable and enable_motd_sys_info are read but
+ are not actioned.
+
+user-script
+-----------
+
+SmartOS traditionally supports sending over a user-script for execution at the
+rc.local level. Cloud-init supports running user-scripts as if they were
+cloud-init user-data. In this sense, anything with a shell interpreter
+directive will run
+
+user-data and user-script
+-------------------------
+
+In the event that a user defines the meta-data key of "user-data" it will
+always supercede any user-script data. This is for consistency.
+
+base64
+------
+
+The following are exempt from base64 encoding, owing to the fact that they
+are provided by SmartOS:
+ * root_authorized_keys
+ * enable_motd_sys_info
+ * iptables_disable
+
+This list can be changed through system config of variable 'no_base64_decode'.
+
+This means that user-script and user-data as well as other values can be
+base64 encoded. Since Cloud-init can only guess as to whether or not something
+is truly base64 encoded, the following meta-data keys are hints as to whether
+or not to base64 decode something:
+ * base64_all: Except for excluded keys, attempt to base64 decode
+ the values. If the value fails to decode properly, it will be
+ returned in its text
+ * base64_keys: A comma deliminated list of which keys are base64 encoded.
+ * b64-<key>:
+ for any key, if there exists an entry in the metadata for 'b64-<key>'
+ Then 'b64-<key>' is expected to be a plaintext boolean indicating whether
+ or not its value is encoded.
+ * no_base64_decode: This is a configuration setting
+ (i.e. /etc/cloud/cloud.cfg.d) that sets which values should not be
+ base64 decoded.
diff --git a/packages/bddeb b/packages/bddeb
index bda3170d..30559870 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -32,14 +32,17 @@ PKG_MP = {
'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
+ 'jsonpatch': 'python-json-patch',
'oauth': 'python-oauth',
'prettytable': 'python-prettytable',
+ 'pyserial': 'python-serial',
'pyyaml': 'python-yaml',
+ 'requests': 'python-requests',
}
-DEBUILD_ARGS = ["-us", "-S", "-uc"]
+DEBUILD_ARGS = ["-us", "-S", "-uc", "-d"]
-def write_debian_folder(root, version, revno):
+def write_debian_folder(root, version, revno, append_requires=[]):
deb_dir = util.abs_join(root, 'debian')
os.makedirs(deb_dir)
@@ -58,7 +61,7 @@ def write_debian_folder(root, version, revno):
pkgs = [p.lower().strip() for p in stdout.splitlines()]
# Map to known packages
- requires = []
+ requires = append_requires
for p in pkgs:
tgt_pkg = PKG_MP.get(p)
if not tgt_pkg:
@@ -87,6 +90,17 @@ def main():
" (default: %(default)s)"),
default=False,
action='store_true')
+ parser.add_argument("--no-cloud-utils", dest="no_cloud_utils",
+ help=("don't depend on cloud-utils package"
+ " (default: %(default)s)"),
+ default=False,
+ action='store_true')
+
+ parser.add_argument("--init-system", dest="init_system",
+ help=("build deb with INIT_SYSTEM=xxx"
+ " (default: %(default)s"),
+ default=os.environ.get("INIT_SYSTEM", "upstart"))
+
for ent in DEBUILD_ARGS:
parser.add_argument(ent, dest="debuild_args", action='append_const',
@@ -94,6 +108,8 @@ def main():
args = parser.parse_args()
+ os.environ['INIT_SYSTEM'] = args.init_system
+
capture = True
if args.verbose:
capture = False
@@ -128,7 +144,11 @@ def main():
shutil.move(extracted_name, xdir)
print("Creating a debian/ folder in %r" % (xdir))
- write_debian_folder(xdir, version, revno)
+ if not args.no_cloud_utils:
+ append_requires=['cloud-utils']
+ else:
+ append_requires=[]
+ write_debian_folder(xdir, version, revno, append_requires)
# The naming here seems to follow some debian standard
# so it will whine if it is changed...
diff --git a/packages/brpm b/packages/brpm
index eea2a046..8c90a0ab 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -34,13 +34,30 @@ from cloudinit import util
# this is a translation of the 'requires'
# file pypi package name to a redhat/fedora package name.
PKG_MP = {
- 'argparse': 'python-argparse',
- 'boto': 'python-boto',
- 'cheetah': 'python-cheetah',
- 'configobj': 'python-configobj',
- 'oauth': 'python-oauth',
- 'prettytable': 'python-prettytable',
- 'pyyaml': 'PyYAML',
+ 'redhat': {
+ 'argparse': 'python-argparse',
+ 'boto': 'python-boto',
+ 'cheetah': 'python-cheetah',
+ 'configobj': 'python-configobj',
+ 'jsonpatch': 'python-jsonpatch',
+ 'oauth': 'python-oauth',
+ 'prettytable': 'python-prettytable',
+ 'pyserial': 'pyserial',
+ 'pyyaml': 'PyYAML',
+ 'requests': 'python-requests',
+ },
+ 'suse': {
+ 'argparse': 'python-argparse',
+ 'boto': 'python-boto',
+ 'cheetah': 'python-cheetah',
+ 'configobj': 'python-configobj',
+ 'jsonpatch': 'python-jsonpatch',
+ 'oauth': 'python-oauth',
+ 'prettytable': 'python-prettytable',
+ 'pyserial': 'python-pyserial',
+ 'pyyaml': 'python-yaml',
+ 'requests': 'python-requests',
+ }
}
# Subdirectories of the ~/rpmbuild dir
@@ -119,7 +136,7 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):
# Map to known packages
requires = []
for p in pkgs:
- tgt_pkg = PKG_MP.get(p)
+ tgt_pkg = PKG_MP[args.distro].get(p)
if not tgt_pkg:
raise RuntimeError(("Do not know how to translate pypi dependency"
" %r to a known package") % (p))
@@ -141,10 +158,11 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):
missing_versions += 1
if missing_versions == 1:
# Must be using a new 'dev'/'trunk' release
- changelog_lines.append(format_change_line(datetime.now(), '??'))
+ changelog_lines.append(format_change_line(datetime.now(),
+ '??'))
else:
- sys.stderr.write(("Changelog version line %s "
- "does not have a corresponding tag!\n") % (line))
+ sys.stderr.write(("Changelog version line %s does not "
+ "have a corresponding tag!\n") % (line))
else:
changelog_lines.append(header)
else:
@@ -170,6 +188,10 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):
def main():
parser = argparse.ArgumentParser()
+ parser.add_argument("-d", "--distro", dest="distro",
+ help="select distro (default: %(default)s)",
+ metavar="DISTRO", default='redhat',
+ choices=('redhat', 'suse'))
parser.add_argument("-b", "--boot", dest="boot",
help="select boot type (default: %(default)s)",
metavar="TYPE", default='sysvinit',
@@ -217,7 +239,7 @@ def main():
# Form the spec file to be used
tmpl_fn = util.abs_join(find_root(), 'packages',
- 'redhat', 'cloud-init.spec.in')
+ args.distro, 'cloud-init.spec.in')
contents = generate_spec_contents(args, tmpl_fn, root_dir,
os.path.basename(archive_fn))
spec_fn = util.abs_join(root_dir, 'cloud-init.spec')
@@ -236,13 +258,15 @@ def main():
globs.extend(glob.glob("%s/*.rpm" %
(util.abs_join(root_dir, 'RPMS', 'noarch'))))
globs.extend(glob.glob("%s/*.rpm" %
+ (util.abs_join(root_dir, 'RPMS', 'x86_64'))))
+ globs.extend(glob.glob("%s/*.rpm" %
(util.abs_join(root_dir, 'RPMS'))))
globs.extend(glob.glob("%s/*.rpm" %
(util.abs_join(root_dir, 'SRPMS'))))
for rpm_fn in globs:
tgt_fn = util.abs_join(os.getcwd(), os.path.basename(rpm_fn))
shutil.move(rpm_fn, tgt_fn)
- print("Wrote out redhat package %r" % (tgt_fn))
+ print("Wrote out %s package %r" % (args.distro, tgt_fn))
return 0
diff --git a/packages/debian/control.in b/packages/debian/control.in
index edb5aff5..b9352f5b 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -18,8 +18,7 @@ Standards-Version: 3.9.3
Package: cloud-init
Architecture: all
-Depends: cloud-utils,
- procps,
+Depends: procps,
python,
#for $r in $requires
${r},
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
new file mode 100644
index 00000000..296505c6
--- /dev/null
+++ b/packages/suse/cloud-init.spec.in
@@ -0,0 +1,162 @@
+## This is a cheetah template
+
+# See: http://www.zarb.org/~jasonc/macros.php
+# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets
+# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
+
+#for $d in $defines
+%define ${d}
+#end for
+
+Name: cloud-init
+Version: ${version}
+Release: ${release}${subrelease}%{?dist}
+Summary: Cloud instance init scripts
+
+Group: System/Management
+License: GPLv3
+URL: http://launchpad.net/cloud-init
+
+Source0: ${archive_name}
+BuildRoot: %{_tmppath}/%{name}-%{version}-build
+
+%if 0%{?suse_version} && 0%{?suse_version} <= 1110
+%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
+%else
+BuildArch: noarch
+%endif
+
+BuildRequires: fdupes
+BuildRequires: filesystem
+BuildRequires: python-devel
+BuildRequires: python-setuptools
+BuildRequires: python-cheetah
+
+%if 0%{?suse_version} && 0%{?suse_version} <= 1210
+ %define initsys sysvinit
+%else
+ %define initsys systemd
+%endif
+
+# System util packages needed
+Requires: iproute2
+Requires: e2fsprogs
+Requires: net-tools
+Requires: procps
+
+# Install pypi 'dynamic' requirements
+#for $r in $requires
+Requires: ${r}
+#end for
+
+# Custom patches
+#set $size = 0
+#for $p in $patches
+Patch${size}: $p
+#set $size += 1
+#end for
+
+%description
+Cloud-init is a set of init scripts for cloud instances. Cloud instances
+need special scripts to run during initialization to retrieve and install
+ssh keys and to let the user run various scripts.
+
+%prep
+%setup -q -n %{name}-%{version}~${release}
+
+# Custom patches activation
+#set $size = 0
+#for $p in $patches
+%patch${size} -p1
+#set $size += 1
+#end for
+
+%build
+%{__python} setup.py build
+
+%install
+%{__python} setup.py install \
+ --skip-build --root=%{buildroot} --prefix=%{_prefix} \
+ --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \
+ --init-system=%{initsys}
+
+# Remove non-SUSE templates
+rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.*
+rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.*
+rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.*
+
+# Remove cloud-init tests
+rm -r %{buildroot}/%{python_sitelib}/tests
+
+# Move sysvinit scripts to the correct place and create symbolic links
+%if %{initsys} == sysvinit
+ mkdir -p %{buildroot}/%{_initddir}
+ mv %{buildroot}%{_sysconfdir}/rc.d/init.d/* %{buildroot}%{_initddir}/
+ rmdir %{buildroot}%{_sysconfdir}/rc.d/init.d
+ rmdir %{buildroot}%{_sysconfdir}/rc.d
+
+ mkdir -p %{buildroot}/%{_sbindir}
+ pushd %{buildroot}/%{_initddir}
+ for file in * ; do
+ ln -s %{_initddir}/\${file} %{buildroot}/%{_sbindir}/rc\${file}
+ done
+ popd
+%endif
+
+# Move documentation
+mkdir -p %{buildroot}/%{_defaultdocdir}
+mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir}
+for doc in TODO LICENSE ChangeLog Requires ; do
+ cp \${doc} %{buildroot}/%{_defaultdocdir}/cloud-init
+done
+
+# Remove duplicate files
+%if 0%{?suse_version}
+ %fdupes %{buildroot}/%{python_sitelib}
+%endif
+
+mkdir -p %{buildroot}/var/lib/cloud
+
+%postun
+%insserv_cleanup
+
+%files
+
+# Sysvinit scripts
+%if %{initsys} == sysvinit
+ %attr(0755, root, root) %{_initddir}/cloud-config
+ %attr(0755, root, root) %{_initddir}/cloud-final
+ %attr(0755, root, root) %{_initddir}/cloud-init-local
+ %attr(0755, root, root) %{_initddir}/cloud-init
+
+ %{_sbindir}/rccloud-*
+%endif
+
+# Program binaries
+%{_bindir}/cloud-init*
+
+# There doesn't seem to be an agreed upon place for these
+# although it appears the standard says /usr/lib but rpmbuild
+# will try /usr/lib64 ??
+/usr/lib/%{name}/uncloud-init
+/usr/lib/%{name}/write-ssh-key-fingerprints
+
+# Docs
+%doc %{_defaultdocdir}/cloud-init/*
+
+# Configs
+%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg
+%dir %{_sysconfdir}/cloud/cloud.cfg.d
+%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg
+%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README
+%dir %{_sysconfdir}/cloud/templates
+%config(noreplace) %{_sysconfdir}/cloud/templates/*
+
+# Python code is here...
+%{python_sitelib}/*
+
+/var/lib/cloud
+
+%changelog
+
+${changelog}
diff --git a/setup.py b/setup.py
index 24476681..8d18b97e 100755
--- a/setup.py
+++ b/setup.py
@@ -37,12 +37,14 @@ def is_f(p):
INITSYS_FILES = {
- 'sysvinit': [f for f in glob('sysvinit/*') if is_f(f)],
+ 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
+ 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
'systemd': [f for f in glob('systemd/*') if is_f(f)],
'upstart': [f for f in glob('upstart/*') if is_f(f)],
}
INITSYS_ROOTS = {
'sysvinit': '/etc/rc.d/init.d',
+ 'sysvinit_deb': '/etc/init.d',
'systemd': '/etc/systemd/system/',
'upstart': '/etc/init/',
}
@@ -59,9 +61,10 @@ def tiny_p(cmd, capture=True):
sp = subprocess.Popen(cmd, stdout=stdout,
stderr=stderr, stdin=None)
(out, err) = sp.communicate()
- if sp.returncode not in [0]:
+ ret = sp.returncode # pylint: disable=E1101
+ if ret not in [0]:
raise RuntimeError("Failed running %s [rc=%s] (%s, %s)"
- % (cmd, sp.returncode, out, err))
+ % (cmd, ret, out, err))
return (out, err)
diff --git a/sysvinit/debian/cloud-config b/sysvinit/debian/cloud-config
new file mode 100644
index 00000000..53322748
--- /dev/null
+++ b/sysvinit/debian/cloud-config
@@ -0,0 +1,64 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: cloud-config
+# Required-Start: cloud-init cloud-init-local
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Cloud init modules --mode config
+# Description: Cloud configuration initialization
+### END INIT INFO
+
+# Authors: Julien Danjou <acid@debian.org>
+# Juerg Haefliger <juerg.haefliger@hp.com>
+# Thomas Goirand <zigo@debian.org>
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Cloud service"
+NAME=cloud-init
+DAEMON=/usr/bin/$NAME
+DAEMON_ARGS="modules --mode config"
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+if init_is_upstart; then
+ case "$1" in
+ stop)
+ exit 0
+ ;;
+ *)
+ exit 1
+ ;;
+ esac
+fi
+
+case "$1" in
+start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ $DAEMON ${DAEMON_ARGS}
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+stop|restart|force-reload)
+ echo "Error: argument '$1' not supported" >&2
+ exit 3
+;;
+*)
+ echo "Usage: $SCRIPTNAME {start}" >&2
+ exit 3
+;;
+esac
+
+:
diff --git a/sysvinit/debian/cloud-final b/sysvinit/debian/cloud-final
new file mode 100644
index 00000000..55afc8b0
--- /dev/null
+++ b/sysvinit/debian/cloud-final
@@ -0,0 +1,66 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: cloud-final
+# Required-Start: $all cloud-config
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Cloud init modules final jobs
+# Description: This runs the cloud configuration initialization "final" jobs
+# and can be seen as the traditional "rc.local" time for the cloud.
+# It runs after all cloud-config jobs are run
+### END INIT INFO
+
+# Authors: Julien Danjou <acid@debian.org>
+# Juerg Haefliger <juerg.haefliger@hp.com>
+# Thomas Goirand <zigo@debian.org>
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Cloud service"
+NAME=cloud-init
+DAEMON=/usr/bin/$NAME
+DAEMON_ARGS="modules --mode final"
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+if init_is_upstart; then
+ case "$1" in
+ stop)
+ exit 0
+ ;;
+ *)
+ exit 1
+ ;;
+ esac
+fi
+
+case "$1" in
+start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ $DAEMON ${DAEMON_ARGS}
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+stop|restart|force-reload)
+ echo "Error: argument '$1' not supported" >&2
+ exit 3
+;;
+*)
+ echo "Usage: $SCRIPTNAME {start}" >&2
+ exit 3
+;;
+esac
+
+:
diff --git a/sysvinit/debian/cloud-init b/sysvinit/debian/cloud-init
new file mode 100755
index 00000000..48fa0423
--- /dev/null
+++ b/sysvinit/debian/cloud-init
@@ -0,0 +1,64 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: cloud-init
+# Required-Start: $local_fs $remote_fs $syslog $network cloud-init-local
+# Required-Stop: $remote_fs
+# X-Start-Before: sshd
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Cloud init
+# Description: Cloud configuration initialization
+### END INIT INFO
+
+# Authors: Julien Danjou <acid@debian.org>
+# Thomas Goirand <zigo@debian.org>
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Cloud service"
+NAME=cloud-init
+DAEMON=/usr/bin/$NAME
+DAEMON_ARGS="init"
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+if init_is_upstart; then
+ case "$1" in
+ stop)
+ exit 0
+ ;;
+ *)
+ exit 1
+ ;;
+ esac
+fi
+
+case "$1" in
+ start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ $DAEMON ${DAEMON_ARGS}
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+ ;;
+ stop|restart|force-reload)
+ echo "Error: argument '$1' not supported" >&2
+ exit 3
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/sysvinit/debian/cloud-init-local b/sysvinit/debian/cloud-init-local
new file mode 100644
index 00000000..802ee8e9
--- /dev/null
+++ b/sysvinit/debian/cloud-init-local
@@ -0,0 +1,63 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: cloud-init-local
+# Required-Start: $local_fs $remote_fs
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Cloud init local
+# Description: Cloud configuration initialization
+### END INIT INFO
+
+# Authors: Julien Danjou <acid@debian.org>
+# Juerg Haefliger <juerg.haefliger@hp.com>
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Cloud service"
+NAME=cloud-init
+DAEMON=/usr/bin/$NAME
+DAEMON_ARGS="init --local"
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+if init_is_upstart; then
+ case "$1" in
+ stop)
+ exit 0
+ ;;
+ *)
+ exit 1
+ ;;
+ esac
+fi
+
+case "$1" in
+start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ $DAEMON ${DAEMON_ARGS}
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+stop|restart|force-reload)
+ echo "Error: argument '$1' not supported" >&2
+ exit 3
+;;
+*)
+ echo "Usage: $SCRIPTNAME {start}" >&2
+ exit 3
+;;
+esac
+
+:
diff --git a/sysvinit/cloud-config b/sysvinit/redhat/cloud-config
index e587446d..ad8ed831 100755
--- a/sysvinit/cloud-config
+++ b/sysvinit/redhat/cloud-config
@@ -29,15 +29,13 @@
# Should-Start: $time
# Required-Stop:
# Should-Stop:
-# Default-Start: 3 5
-# Default-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
# Short-Description: The config cloud-init job
# Description: Start cloud-init and runs the config phase
# and any associated config modules as desired.
### END INIT INFO
-. /etc/init.d/functions
-
# Return values acc. to LSB for all commands but status:
# 0 - success
# 1 - generic or unspecified error
@@ -60,8 +58,9 @@ prog="cloud-init"
cloud_init="/usr/bin/cloud-init"
conf="/etc/cloud/cloud.cfg"
-# If there exists a sysconfig variable override file use it...
+# If there exist sysconfig/default variable override files use it...
[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
+[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init
start() {
[ -x $cloud_init ] || return 5
@@ -80,8 +79,6 @@ stop() {
return $RETVAL
}
-. /etc/init.d/functions
-
case "$1" in
start)
start
diff --git a/sysvinit/cloud-final b/sysvinit/redhat/cloud-final
index 5deb8457..aeae8903 100755
--- a/sysvinit/cloud-final
+++ b/sysvinit/redhat/cloud-final
@@ -29,15 +29,13 @@
# Should-Start: $time
# Required-Stop:
# Should-Stop:
-# Default-Start: 3 5
-# Default-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
# Short-Description: The final cloud-init job
# Description: Start cloud-init and runs the final phase
# and any associated final modules as desired.
### END INIT INFO
-. /etc/init.d/functions
-
# Return values acc. to LSB for all commands but status:
# 0 - success
# 1 - generic or unspecified error
@@ -60,8 +58,9 @@ prog="cloud-init"
cloud_init="/usr/bin/cloud-init"
conf="/etc/cloud/cloud.cfg"
-# If there exists a sysconfig variable override file use it...
+# If there exist sysconfig/default variable override files use it...
[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
+[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init
start() {
[ -x $cloud_init ] || return 5
@@ -80,8 +79,6 @@ stop() {
return $RETVAL
}
-. /etc/init.d/functions
-
case "$1" in
start)
start
diff --git a/sysvinit/cloud-init b/sysvinit/redhat/cloud-init
index f8ab5d5f..c1c92ad0 100755
--- a/sysvinit/cloud-init
+++ b/sysvinit/redhat/cloud-init
@@ -29,15 +29,13 @@
# Should-Start: $time
# Required-Stop:
# Should-Stop:
-# Default-Start: 3 5
-# Default-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
# Short-Description: The initial cloud-init job (net and fs contingent)
# Description: Start cloud-init and runs the initialization phase
# and any associated initial modules as desired.
### END INIT INFO
-. /etc/init.d/functions
-
# Return values acc. to LSB for all commands but status:
# 0 - success
# 1 - generic or unspecified error
@@ -60,8 +58,9 @@ prog="cloud-init"
cloud_init="/usr/bin/cloud-init"
conf="/etc/cloud/cloud.cfg"
-# If there exists a sysconfig variable override file use it...
+# If there exist sysconfig/default variable override files use it...
[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
+[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init
start() {
[ -x $cloud_init ] || return 5
@@ -80,8 +79,6 @@ stop() {
return $RETVAL
}
-. /etc/init.d/functions
-
case "$1" in
start)
start
diff --git a/sysvinit/cloud-init-local b/sysvinit/redhat/cloud-init-local
index 0c63b9b0..b53e0db2 100755
--- a/sysvinit/cloud-init-local
+++ b/sysvinit/redhat/cloud-init-local
@@ -29,15 +29,13 @@
# Should-Start: $time
# Required-Stop:
# Should-Stop:
-# Default-Start: 3 5
-# Default-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
# Short-Description: The initial cloud-init job (local fs contingent)
# Description: Start cloud-init and runs the initialization phases
# and any associated initial modules as desired.
### END INIT INFO
-. /etc/init.d/functions
-
# Return values acc. to LSB for all commands but status:
# 0 - success
# 1 - generic or unspecified error
@@ -60,8 +58,9 @@ prog="cloud-init"
cloud_init="/usr/bin/cloud-init"
conf="/etc/cloud/cloud.cfg"
-# If there exists a sysconfig variable override file use it...
+# If there exist sysconfig/default variable override files use it...
[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
+[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init
start() {
[ -x $cloud_init ] || return 5
@@ -80,8 +79,6 @@ stop() {
return $RETVAL
}
-. /etc/init.d/functions
-
case "$1" in
start)
start
diff --git a/templates/hosts.suse.tmpl b/templates/hosts.suse.tmpl
new file mode 100644
index 00000000..5d3d57e4
--- /dev/null
+++ b/templates/hosts.suse.tmpl
@@ -0,0 +1,24 @@
+#*
+ This file /etc/cloud/templates/hosts.suse.tmpl is only utilized
+ if enabled in cloud-config. Specifically, in order to enable it
+ you need to add the following to config:
+ manage_etc_hosts: True
+*#
+# Your system has configured 'manage_etc_hosts' as True.
+# As a result, if you wish for changes to this file to persist
+# then you will need to either
+# a.) make changes to the master file in /etc/cloud/templates/hosts.suse.tmpl
+# b.) change or remove the value of 'manage_etc_hosts' in
+# /etc/cloud/cloud.cfg or cloud-config from user-data
+#
+# The following lines are desirable for IPv4 capable hosts
+127.0.0.1 localhost
+
+# The following lines are desirable for IPv6 capable hosts
+::1 localhost ipv6-localhost ipv6-loopback
+fe00::0 ipv6-localnet
+
+ff00::0 ipv6-mcastprefix
+ff02::1 ipv6-allnodes
+ff02::2 ipv6-allrouters
+ff02::3 ipv6-allhosts
diff --git a/templates/sources.list.debian.tmpl b/templates/sources.list.debian.tmpl
new file mode 100644
index 00000000..609bc6bd
--- /dev/null
+++ b/templates/sources.list.debian.tmpl
@@ -0,0 +1,28 @@
+\## Note, this file is written by cloud-init on first boot of an instance
+\## modifications made here will not survive a re-bundle.
+\## if you wish to make changes you can:
+\## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
+\## or do the same in user-data
+\## b.) add sources in /etc/apt/sources.list.d
+\## c.) make changes to template file /etc/cloud/templates/sources.list.debian.tmpl
+\###
+
+# See http://www.debian.org/releases/stable/i386/release-notes/ch-upgrading.html
+# for how to upgrade to newer versions of the distribution.
+deb $mirror $codename main contrib non-free
+deb-src $mirror $codename main contrib non-free
+
+\## Major bug fix updates produced after the final release of the
+\## distribution.
+deb $security $codename/updates main contrib non-free
+deb-src $security $codename/updates main contrib non-free
+deb $mirror $codename-updates main contrib non-free
+deb-src $mirror $codename-updates main contrib non-free
+
+\## Uncomment the following two lines to add software from the 'backports'
+\## repository.
+\## N.B. software from this repository may not have been tested as
+\## extensively as that contained in the main release, although it includes
+\## newer versions of some applications which may provide useful features.
+# deb http://backports.debian.org/debian-backports $codename-backports main contrib non-free
+# deb-src http://backports.debian.org/debian-backports $codename-backports main contrib non-free
diff --git a/templates/sources.list.tmpl b/templates/sources.list.ubuntu.tmpl
index ce395b3d..ce395b3d 100644
--- a/templates/sources.list.tmpl
+++ b/templates/sources.list.ubuntu.tmpl
diff --git a/tests/data/merge_sources/expected1.yaml b/tests/data/merge_sources/expected1.yaml
new file mode 100644
index 00000000..640d282b
--- /dev/null
+++ b/tests/data/merge_sources/expected1.yaml
@@ -0,0 +1 @@
+Blah: ['blah2', 'b']
diff --git a/tests/data/merge_sources/expected10.yaml b/tests/data/merge_sources/expected10.yaml
new file mode 100644
index 00000000..b865db16
--- /dev/null
+++ b/tests/data/merge_sources/expected10.yaml
@@ -0,0 +1,7 @@
+#cloud-config
+
+power_state:
+ delay: 30
+ mode: poweroff
+ message: [Bye, Bye, Pew, Pew]
+
diff --git a/tests/data/merge_sources/expected11.yaml b/tests/data/merge_sources/expected11.yaml
new file mode 100644
index 00000000..c0530dc3
--- /dev/null
+++ b/tests/data/merge_sources/expected11.yaml
@@ -0,0 +1,5 @@
+#cloud-config
+
+a: 22
+b: 4
+c: 3
diff --git a/tests/data/merge_sources/expected12.yaml b/tests/data/merge_sources/expected12.yaml
new file mode 100644
index 00000000..0421d2c8
--- /dev/null
+++ b/tests/data/merge_sources/expected12.yaml
@@ -0,0 +1,5 @@
+#cloud-config
+
+a:
+ e:
+ y: 2
diff --git a/tests/data/merge_sources/expected2.yaml b/tests/data/merge_sources/expected2.yaml
new file mode 100644
index 00000000..6eccc2cf
--- /dev/null
+++ b/tests/data/merge_sources/expected2.yaml
@@ -0,0 +1,3 @@
+Blah: 3
+Blah2: 2
+Blah3: [1]
diff --git a/tests/data/merge_sources/expected3.yaml b/tests/data/merge_sources/expected3.yaml
new file mode 100644
index 00000000..32d9ad48
--- /dev/null
+++ b/tests/data/merge_sources/expected3.yaml
@@ -0,0 +1 @@
+Blah: [blah2, 'blah1']
diff --git a/tests/data/merge_sources/expected4.yaml b/tests/data/merge_sources/expected4.yaml
new file mode 100644
index 00000000..d88d8f73
--- /dev/null
+++ b/tests/data/merge_sources/expected4.yaml
@@ -0,0 +1,2 @@
+#cloud-config
+Blah: {}
diff --git a/tests/data/merge_sources/expected5.yaml b/tests/data/merge_sources/expected5.yaml
new file mode 100644
index 00000000..628f5878
--- /dev/null
+++ b/tests/data/merge_sources/expected5.yaml
@@ -0,0 +1,7 @@
+#cloud-config
+
+Blah: 3
+Blah2: 2
+Blah3: [1]
+
+
diff --git a/tests/data/merge_sources/expected6.yaml b/tests/data/merge_sources/expected6.yaml
new file mode 100644
index 00000000..7afe1d7c
--- /dev/null
+++ b/tests/data/merge_sources/expected6.yaml
@@ -0,0 +1,9 @@
+#cloud-config
+
+run_cmds:
+ - bash
+ - top
+ - ps
+ - vi
+ - emacs
+
diff --git a/tests/data/merge_sources/expected7.yaml b/tests/data/merge_sources/expected7.yaml
new file mode 100644
index 00000000..25284f04
--- /dev/null
+++ b/tests/data/merge_sources/expected7.yaml
@@ -0,0 +1,38 @@
+#cloud-config
+
+users:
+ - default
+ - name: foobar
+ gecos: Foo B. Bar
+ primary-group: foobar
+ groups: users
+ selinux-user: staff_u
+ expiredate: 2012-09-01
+ ssh-import-id: foobar
+ lock-passwd: false
+ passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
+ - name: barfoo
+ gecos: Bar B. Foo
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: users, admin
+ ssh-import-id: None
+ lock-passwd: true
+ ssh-authorized-keys:
+ - <ssh pub key 1>
+ - <ssh pub key 2>
+ - name: cloudy
+ gecos: Magic Cloud App Daemon User
+ inactive: true
+ system: true
+ - bob
+ - joe
+ - sue
+ - name: foobar_jr
+ gecos: Foo B. Bar Jr
+ primary-group: foobar
+ groups: users
+ selinux-user: staff_u
+ expiredate: 2012-09-01
+ ssh-import-id: foobar
+ lock-passwd: false
+ passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
diff --git a/tests/data/merge_sources/expected8.yaml b/tests/data/merge_sources/expected8.yaml
new file mode 100644
index 00000000..69ca562d
--- /dev/null
+++ b/tests/data/merge_sources/expected8.yaml
@@ -0,0 +1,7 @@
+#cloud-config
+
+mounts:
+ - [ ephemeral22, /mnt, auto, "defaults,noexec" ]
+ - [ sdc, /opt/data ]
+ - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ]
+ - [ dd, /dev/zero ]
diff --git a/tests/data/merge_sources/expected9.yaml b/tests/data/merge_sources/expected9.yaml
new file mode 100644
index 00000000..00f91ca0
--- /dev/null
+++ b/tests/data/merge_sources/expected9.yaml
@@ -0,0 +1,5 @@
+#cloud-config
+
+phone_home:
+ url: http://my.example.com/$INSTANCE_ID/$BLAH_BLAH
+ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
diff --git a/tests/data/merge_sources/source1-1.yaml b/tests/data/merge_sources/source1-1.yaml
new file mode 100644
index 00000000..38e4e5e0
--- /dev/null
+++ b/tests/data/merge_sources/source1-1.yaml
@@ -0,0 +1,3 @@
+#cloud-config
+Blah: ['blah2']
+
diff --git a/tests/data/merge_sources/source1-2.yaml b/tests/data/merge_sources/source1-2.yaml
new file mode 100644
index 00000000..2cd0e0e5
--- /dev/null
+++ b/tests/data/merge_sources/source1-2.yaml
@@ -0,0 +1,5 @@
+#cloud-config
+
+Blah: ['b']
+
+merge_how: 'dict(recurse_array,no_replace)+list(append)'
diff --git a/tests/data/merge_sources/source10-1.yaml b/tests/data/merge_sources/source10-1.yaml
new file mode 100644
index 00000000..6ae72a13
--- /dev/null
+++ b/tests/data/merge_sources/source10-1.yaml
@@ -0,0 +1,6 @@
+#cloud-config
+
+power_state:
+ delay: 30
+ mode: poweroff
+ message: [Bye, Bye]
diff --git a/tests/data/merge_sources/source10-2.yaml b/tests/data/merge_sources/source10-2.yaml
new file mode 100644
index 00000000..a38cf1c5
--- /dev/null
+++ b/tests/data/merge_sources/source10-2.yaml
@@ -0,0 +1,6 @@
+#cloud-config
+
+power_state:
+ message: [Pew, Pew]
+
+merge_how: 'dict(recurse_list)+list(append)'
diff --git a/tests/data/merge_sources/source11-1.yaml b/tests/data/merge_sources/source11-1.yaml
new file mode 100644
index 00000000..ee29d681
--- /dev/null
+++ b/tests/data/merge_sources/source11-1.yaml
@@ -0,0 +1,5 @@
+#cloud-config
+
+a: 1
+b: 2
+c: 3
diff --git a/tests/data/merge_sources/source11-2.yaml b/tests/data/merge_sources/source11-2.yaml
new file mode 100644
index 00000000..a9914c34
--- /dev/null
+++ b/tests/data/merge_sources/source11-2.yaml
@@ -0,0 +1,3 @@
+#cloud-config
+
+b: 4
diff --git a/tests/data/merge_sources/source11-3.yaml b/tests/data/merge_sources/source11-3.yaml
new file mode 100644
index 00000000..8f2b8944
--- /dev/null
+++ b/tests/data/merge_sources/source11-3.yaml
@@ -0,0 +1,3 @@
+#cloud-config
+
+a: 22
diff --git a/tests/data/merge_sources/source12-1.yaml b/tests/data/merge_sources/source12-1.yaml
new file mode 100644
index 00000000..09e7c899
--- /dev/null
+++ b/tests/data/merge_sources/source12-1.yaml
@@ -0,0 +1,8 @@
+#cloud-config
+
+a:
+ c: 1
+ d: 2
+ e:
+ z: a
+ y: b
diff --git a/tests/data/merge_sources/source12-2.yaml b/tests/data/merge_sources/source12-2.yaml
new file mode 100644
index 00000000..0421d2c8
--- /dev/null
+++ b/tests/data/merge_sources/source12-2.yaml
@@ -0,0 +1,5 @@
+#cloud-config
+
+a:
+ e:
+ y: 2
diff --git a/tests/data/merge_sources/source2-1.yaml b/tests/data/merge_sources/source2-1.yaml
new file mode 100644
index 00000000..c7a33aaa
--- /dev/null
+++ b/tests/data/merge_sources/source2-1.yaml
@@ -0,0 +1,6 @@
+#cloud-config
+
+
+Blah: 1
+Blah2: 2
+Blah3: 3
diff --git a/tests/data/merge_sources/source2-2.yaml b/tests/data/merge_sources/source2-2.yaml
new file mode 100644
index 00000000..8f2fdc1a
--- /dev/null
+++ b/tests/data/merge_sources/source2-2.yaml
@@ -0,0 +1,5 @@
+#cloud-config
+
+Blah: 3
+Blah2: 2
+Blah3: [1]
diff --git a/tests/data/merge_sources/source3-1.yaml b/tests/data/merge_sources/source3-1.yaml
new file mode 100644
index 00000000..2303e906
--- /dev/null
+++ b/tests/data/merge_sources/source3-1.yaml
@@ -0,0 +1,4 @@
+#cloud-config
+Blah: ['blah1']
+
+
diff --git a/tests/data/merge_sources/source3-2.yaml b/tests/data/merge_sources/source3-2.yaml
new file mode 100644
index 00000000..dca2ad10
--- /dev/null
+++ b/tests/data/merge_sources/source3-2.yaml
@@ -0,0 +1,4 @@
+#cloud-config
+Blah: ['blah2']
+
+merge_how: 'dict(recurse_array,no_replace)+list(prepend)'
diff --git a/tests/data/merge_sources/source4-1.yaml b/tests/data/merge_sources/source4-1.yaml
new file mode 100644
index 00000000..e5b16872
--- /dev/null
+++ b/tests/data/merge_sources/source4-1.yaml
@@ -0,0 +1,3 @@
+#cloud-config
+Blah:
+ b: 1
diff --git a/tests/data/merge_sources/source4-2.yaml b/tests/data/merge_sources/source4-2.yaml
new file mode 100644
index 00000000..1844e0f8
--- /dev/null
+++ b/tests/data/merge_sources/source4-2.yaml
@@ -0,0 +1,6 @@
+#cloud-config
+Blah:
+ b: null
+
+
+merge_how: 'dict(allow_delete,no_replace)+list()'
diff --git a/tests/data/merge_sources/source5-1.yaml b/tests/data/merge_sources/source5-1.yaml
new file mode 100644
index 00000000..c7a33aaa
--- /dev/null
+++ b/tests/data/merge_sources/source5-1.yaml
@@ -0,0 +1,6 @@
+#cloud-config
+
+
+Blah: 1
+Blah2: 2
+Blah3: 3
diff --git a/tests/data/merge_sources/source5-2.yaml b/tests/data/merge_sources/source5-2.yaml
new file mode 100644
index 00000000..f61c96a2
--- /dev/null
+++ b/tests/data/merge_sources/source5-2.yaml
@@ -0,0 +1,8 @@
+#cloud-config
+
+Blah: 3
+Blah2: 2
+Blah3: [1]
+
+
+merge_how: 'dict(replace)+list(append)'
diff --git a/tests/data/merge_sources/source6-1.yaml b/tests/data/merge_sources/source6-1.yaml
new file mode 100644
index 00000000..519f7309
--- /dev/null
+++ b/tests/data/merge_sources/source6-1.yaml
@@ -0,0 +1,5 @@
+#cloud-config
+
+run_cmds:
+ - bash
+ - top
diff --git a/tests/data/merge_sources/source6-2.yaml b/tests/data/merge_sources/source6-2.yaml
new file mode 100644
index 00000000..d8fac446
--- /dev/null
+++ b/tests/data/merge_sources/source6-2.yaml
@@ -0,0 +1,8 @@
+#cloud-config
+
+run_cmds:
+ - ps
+ - vi
+ - emacs
+
+merge_type: 'list(append)+dict(recurse_array)+str()'
diff --git a/tests/data/merge_sources/source7-1.yaml b/tests/data/merge_sources/source7-1.yaml
new file mode 100644
index 00000000..8fb9b32a
--- /dev/null
+++ b/tests/data/merge_sources/source7-1.yaml
@@ -0,0 +1,27 @@
+#cloud-config
+
+users:
+ - default
+ - name: foobar
+ gecos: Foo B. Bar
+ primary-group: foobar
+ groups: users
+ selinux-user: staff_u
+ expiredate: 2012-09-01
+ ssh-import-id: foobar
+ lock-passwd: false
+ passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
+ - name: barfoo
+ gecos: Bar B. Foo
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: users, admin
+ ssh-import-id: None
+ lock-passwd: true
+ ssh-authorized-keys:
+ - <ssh pub key 1>
+ - <ssh pub key 2>
+ - name: cloudy
+ gecos: Magic Cloud App Daemon User
+ inactive: true
+ system: true
+
diff --git a/tests/data/merge_sources/source7-2.yaml b/tests/data/merge_sources/source7-2.yaml
new file mode 100644
index 00000000..1e26201b
--- /dev/null
+++ b/tests/data/merge_sources/source7-2.yaml
@@ -0,0 +1,17 @@
+#cloud-config
+
+users:
+ - bob
+ - joe
+ - sue
+ - name: foobar_jr
+ gecos: Foo B. Bar Jr
+ primary-group: foobar
+ groups: users
+ selinux-user: staff_u
+ expiredate: 2012-09-01
+ ssh-import-id: foobar
+ lock-passwd: false
+ passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
+
+merge_how: "dict(recurse_array)+list(append)"
diff --git a/tests/data/merge_sources/source8-1.yaml b/tests/data/merge_sources/source8-1.yaml
new file mode 100644
index 00000000..5ea51c2c
--- /dev/null
+++ b/tests/data/merge_sources/source8-1.yaml
@@ -0,0 +1,7 @@
+#cloud-config
+
+mounts:
+ - [ ephemeral0, /mnt, auto, "defaults,noexec" ]
+ - [ sdc, /opt/data ]
+ - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ]
+ - [ dd, /dev/zero ]
diff --git a/tests/data/merge_sources/source8-2.yaml b/tests/data/merge_sources/source8-2.yaml
new file mode 100644
index 00000000..7fa3262b
--- /dev/null
+++ b/tests/data/merge_sources/source8-2.yaml
@@ -0,0 +1,6 @@
+#cloud-config
+
+mounts:
+ - [ ephemeral22, /mnt, auto, "defaults,noexec" ]
+
+merge_how: 'dict(recurse_array)+list(recurse_list,recurse_str)+str()'
diff --git a/tests/data/merge_sources/source9-1.yaml b/tests/data/merge_sources/source9-1.yaml
new file mode 100644
index 00000000..0b102ba6
--- /dev/null
+++ b/tests/data/merge_sources/source9-1.yaml
@@ -0,0 +1,5 @@
+#cloud-config
+
+phone_home:
+ url: http://my.example.com/$INSTANCE_ID/
+ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
diff --git a/tests/data/merge_sources/source9-2.yaml b/tests/data/merge_sources/source9-2.yaml
new file mode 100644
index 00000000..ac85afc6
--- /dev/null
+++ b/tests/data/merge_sources/source9-2.yaml
@@ -0,0 +1,6 @@
+#cloud-config
+
+phone_home:
+ url: $BLAH_BLAH
+
+merge_how: 'dict(recurse_str)+str(append)'
diff --git a/tests/data/mountinfo_precise_ext4.txt b/tests/data/mountinfo_precise_ext4.txt
new file mode 100644
index 00000000..a7a1db67
--- /dev/null
+++ b/tests/data/mountinfo_precise_ext4.txt
@@ -0,0 +1,24 @@
+15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
+16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
+17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755
+18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
+19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755
+20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered
+21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755
+22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
+23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
+25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
+26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
+27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
+28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
+24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset
+29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu
+30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct
+31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory
+32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices
+33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer
+34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio
+35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event
+36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered
+37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw
+39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
diff --git a/tests/data/mountinfo_raring_btrfs.txt b/tests/data/mountinfo_raring_btrfs.txt
new file mode 100644
index 00000000..c5795636
--- /dev/null
+++ b/tests/data/mountinfo_raring_btrfs.txt
@@ -0,0 +1,13 @@
+15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
+16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
+17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755
+18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
+19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755
+20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache
+21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
+22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
+23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
+24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
+25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
+26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
+27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 904677f1..c0da0983 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -146,7 +146,8 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
('chmod', 1),
('delete_dir_contents', 1),
('del_file', 1),
- ('sym_link', -1)],
+ ('sym_link', -1),
+ ('copy', -1)],
}
for (mod, funcs) in patch_funcs.items():
for (f, am) in funcs:
@@ -175,6 +176,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
def patchOS(self, new_root):
patch_funcs = {
os.path: ['isfile', 'exists', 'islink', 'isdir'],
+ os: ['listdir'],
}
for (mod, funcs) in patch_funcs.items():
for f in funcs:
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index ac082076..b4b20e51 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -22,7 +22,8 @@ class FakeModule(handlers.Handler):
def list_types(self):
return self.types
- def _handle_part(self, data, ctype, filename, payload, frequency):
+ def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
+ payload, frequency):
pass
@@ -103,6 +104,9 @@ class TestHandlerHandlePart(MockerTestCase):
self.filename = "fake filename"
self.payload = "fake payload"
self.frequency = settings.PER_INSTANCE
+ self.headers = {
+ 'Content-Type': self.ctype,
+ }
def test_normal_version_1(self):
"""
@@ -118,8 +122,8 @@ class TestHandlerHandlePart(MockerTestCase):
self.payload)
self.mocker.replay()
- handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.filename,
+ self.payload, self.frequency, self.headers)
def test_normal_version_2(self):
"""
@@ -135,8 +139,8 @@ class TestHandlerHandlePart(MockerTestCase):
self.payload, self.frequency)
self.mocker.replay()
- handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.filename,
+ self.payload, self.frequency, self.headers)
def test_modfreq_per_always(self):
"""
@@ -152,8 +156,8 @@ class TestHandlerHandlePart(MockerTestCase):
self.payload)
self.mocker.replay()
- handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.filename,
+ self.payload, self.frequency, self.headers)
def test_no_handle_when_modfreq_once(self):
"""C{handle_part} is not called if frequency is once."""
@@ -163,8 +167,8 @@ class TestHandlerHandlePart(MockerTestCase):
self.mocker.result(settings.PER_ONCE)
self.mocker.replay()
- handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.filename,
+ self.payload, self.frequency, self.headers)
def test_exception_is_caught(self):
"""Exceptions within C{handle_part} are caught and logged."""
@@ -178,8 +182,8 @@ class TestHandlerHandlePart(MockerTestCase):
self.mocker.throw(Exception())
self.mocker.replay()
- handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.filename,
+ self.payload, self.frequency, self.headers)
class TestCmdlineUrl(MockerTestCase):
@@ -191,8 +195,8 @@ class TestCmdlineUrl(MockerTestCase):
mock_readurl = self.mocker.replace(url_helper.readurl,
passthrough=False)
- mock_readurl(url)
- self.mocker.result(url_helper.UrlResponse(200, payload))
+ mock_readurl(url, ARGS, KWARGS)
+ self.mocker.result(util.StringResponse(payload))
self.mocker.replay()
self.assertEqual((key, url, None),
@@ -207,8 +211,8 @@ class TestCmdlineUrl(MockerTestCase):
mock_readurl = self.mocker.replace(url_helper.readurl,
passthrough=False)
- mock_readurl(url)
- self.mocker.result(url_helper.UrlResponse(200, payload))
+ mock_readurl(url, ARGS, KWARGS)
+ self.mocker.result(util.StringResponse(payload))
self.mocker.replay()
self.assertEqual((key, url, payload),
@@ -221,7 +225,7 @@ class TestCmdlineUrl(MockerTestCase):
cmdline = "ro %s=%s bar=1" % (key, url)
self.mocker.replace(url_helper.readurl, passthrough=False)
- self.mocker.result(url_helper.UrlResponse(400))
+ self.mocker.result(util.StringResponse(""))
self.mocker.replay()
self.assertEqual((None, None, None),
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index 5f41cb3d..b387f13b 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -2,7 +2,7 @@
import os
-from mocker import MockerTestCase
+from tests.unittests import helpers as test_helpers
from cloudinit import handlers
from cloudinit import helpers
@@ -13,7 +13,7 @@ from cloudinit.handlers import upstart_job
from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
-class TestBuiltins(MockerTestCase):
+class TestBuiltins(test_helpers.FilesystemMockingTestCase):
def test_upstart_frequency_no_out(self):
c_root = self.makeDir()
@@ -36,13 +36,18 @@ class TestBuiltins(MockerTestCase):
def test_upstart_frequency_single(self):
# files should be written out when frequency is ! per-instance
- c_root = self.makeDir()
- up_root = self.makeDir()
+ new_root = self.makeDir()
+ freq = PER_INSTANCE
+
+ self.patchOS(new_root)
+ self.patchUtils(new_root)
paths = helpers.Paths({
- 'cloud_dir': c_root,
- 'upstart_dir': up_root,
+ 'upstart_dir': "/etc/upstart",
})
- freq = PER_INSTANCE
+
+ upstart_job.SUITABLE_UPSTART = True
+ util.ensure_dir("/run")
+ util.ensure_dir("/etc/upstart")
mock_subp = self.mocker.replace(util.subp, passthrough=False)
mock_subp(["initctl", "reload-configuration"], capture=False)
@@ -55,4 +60,5 @@ class TestBuiltins(MockerTestCase):
'test.conf', 'blah', freq)
h.handle_part('', handlers.CONTENT_END,
None, None, None)
- self.assertEquals(1, len(os.listdir(up_root)))
+
+ self.assertEquals(1, len(os.listdir('/etc/upstart')))
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
new file mode 100644
index 00000000..1ca6a79d
--- /dev/null
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -0,0 +1,331 @@
+from cloudinit import helpers
+from cloudinit.sources import DataSourceAzure
+from tests.unittests.helpers import populate_dir
+
+import crypt
+import base64
+from mocker import MockerTestCase
+import os
+import yaml
+
+
+def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
+ if data is None:
+ data = {'HostName': 'FOOHOST'}
+ if pubkeys is None:
+ pubkeys = {}
+
+ content = """<?xml version="1.0" encoding="utf-8"?>
+<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:wa="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+ <wa:ProvisioningSection><wa:Version>1.0</wa:Version>
+ <LinuxProvisioningConfigurationSet
+ xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
+ """
+ for key, dval in data.items():
+ if isinstance(dval, dict):
+ val = dval.get('text')
+ attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v in dval.items()
+ if k != 'text'])
+ else:
+ val = dval
+ attrs = ""
+ content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
+
+ if userdata:
+ content += "<UserData>%s</UserData>\n" % (base64.b64encode(userdata))
+
+ if pubkeys:
+ content += "<SSH><PublicKeys>\n"
+ for fp, path in pubkeys:
+ content += " <PublicKey>"
+ content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" %
+ (fp, path))
+ content += "</PublicKey>\n"
+ content += "</PublicKeys></SSH>"
+ content += """
+ </LinuxProvisioningConfigurationSet>
+ </wa:ProvisioningSection>
+ <wa:PlatformSettingsSection><wa:Version>1.0</wa:Version>
+ <PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <KmsServerHostname>kms.core.windows.net</KmsServerHostname>
+ <ProvisionGuestAgent>false</ProvisionGuestAgent>
+ <GuestAgentPackageName i:nil="true" />
+ </PlatformSettings></wa:PlatformSettingsSection>
+</Environment>
+ """
+
+ return content
+
+
+class TestAzureDataSource(MockerTestCase):
+
+ def setUp(self):
+ # makeDir comes from MockerTestCase
+ self.tmp = self.makeDir()
+
+ # patch cloud_dir, so our 'seed_dir' is guaranteed empty
+ self.paths = helpers.Paths({'cloud_dir': self.tmp})
+
+ self.unapply = []
+ super(TestAzureDataSource, self).setUp()
+
+ def tearDown(self):
+ apply_patches([i for i in reversed(self.unapply)])
+ super(TestAzureDataSource, self).tearDown()
+
+ def apply_patches(self, patches):
+ ret = apply_patches(patches)
+ self.unapply += ret
+
+ def _get_ds(self, data):
+
+ def dsdevs():
+ return data.get('dsdevs', [])
+
+ def _invoke_agent(cmd):
+ data['agent_invoked'] = cmd
+
+ def _write_files(datadir, files, dirmode):
+ data['files'] = {}
+ data['datadir'] = datadir
+ data['datadir_mode'] = dirmode
+ for (fname, content) in files.items():
+ data['files'][fname] = content
+
+ def _wait_for_files(flist, _maxwait=None, _naplen=None):
+ data['waited'] = flist
+ return []
+
+ def _pubkeys_from_crt_files(flist):
+ data['pubkey_files'] = flist
+ return ["pubkey_from: %s" % f for f in flist]
+
+ def _iid_from_shared_config(path):
+ data['iid_from_shared_cfg'] = path
+ return 'i-my-azure-id'
+
+ def _apply_hostname_bounce(**kwargs):
+ data['apply_hostname_bounce'] = kwargs
+
+ if data.get('ovfcontent') is not None:
+ populate_dir(os.path.join(self.paths.seed_dir, "azure"),
+ {'ovf-env.xml': data['ovfcontent']})
+
+ mod = DataSourceAzure
+
+ if data.get('dsdevs'):
+ self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)])
+
+ self.apply_patches([(mod, 'invoke_agent', _invoke_agent),
+ (mod, 'write_files', _write_files),
+ (mod, 'wait_for_files', _wait_for_files),
+ (mod, 'pubkeys_from_crt_files',
+ _pubkeys_from_crt_files),
+ (mod, 'iid_from_shared_config',
+ _iid_from_shared_config),
+ (mod, 'apply_hostname_bounce',
+ _apply_hostname_bounce), ])
+
+ dsrc = mod.DataSourceAzureNet(
+ data.get('sys_cfg', {}), distro=None, paths=self.paths)
+
+ return dsrc
+
+ def test_basic_seed_dir(self):
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, "")
+ self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName'])
+ self.assertTrue('ovf-env.xml' in data['files'])
+ self.assertEqual(0700, data['datadir_mode'])
+ self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id')
+
+ def test_user_cfg_set_agent_command_plain(self):
+ # set dscfg in via plaintext
+ cfg = {'agent_command': "my_command"}
+ odata = {'HostName': "myhost", 'UserName': "myuser",
+ 'dscfg': {'text': yaml.dump(cfg), 'encoding': 'plain'}}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(data['agent_invoked'], cfg['agent_command'])
+
+ def test_user_cfg_set_agent_command(self):
+ # set dscfg in via base64 encoded yaml
+ cfg = {'agent_command': "my_command"}
+ odata = {'HostName': "myhost", 'UserName': "myuser",
+ 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)),
+ 'encoding': 'base64'}}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(data['agent_invoked'], cfg['agent_command'])
+
+ def test_sys_cfg_set_agent_command(self):
+ sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}}
+ data = {'ovfcontent': construct_valid_ovf_env(data={}),
+ 'sys_cfg': sys_cfg}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(data['agent_invoked'], '_COMMAND')
+
+ def test_username_used(self):
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.cfg['system_info']['default_user']['name'],
+ "myuser")
+
+ def test_password_given(self):
+ odata = {'HostName': "myhost", 'UserName': "myuser",
+ 'UserPassword': "mypass"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertTrue('default_user' in dsrc.cfg['system_info'])
+ defuser = dsrc.cfg['system_info']['default_user']
+
+ # default user should be updated username and should not be locked.
+ self.assertEqual(defuser['name'], odata['UserName'])
+ self.assertFalse(defuser['lock_passwd'])
+ # passwd is crypt formated string $id$salt$encrypted
+ # encrypting plaintext with salt value of everything up to final '$'
+ # should equal that after the '$'
+ pos = defuser['passwd'].rfind("$") + 1
+ self.assertEqual(defuser['passwd'],
+ crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos]))
+
+ def test_userdata_found(self):
+ mydata = "FOOBAR"
+ odata = {'UserData': base64.b64encode(mydata)}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, mydata)
+
+ def test_no_datasource_expected(self):
+ #no source should be found if no seed_dir and no devs
+ data = {}
+ dsrc = self._get_ds({})
+ ret = dsrc.get_data()
+ self.assertFalse(ret)
+ self.assertFalse('agent_invoked' in data)
+
+ def test_cfg_has_pubkeys(self):
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}]
+ pubkeys = [(x['fingerprint'], x['path']) for x in mypklist]
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata,
+ pubkeys=pubkeys)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ for mypk in mypklist:
+ self.assertIn(mypk, dsrc.cfg['_pubkeys'])
+
+ def test_disabled_bounce(self):
+ pass
+
+ def test_apply_bounce_call_1(self):
+ # hostname needs to get through to apply_hostname_bounce
+ odata = {'HostName': 'my-random-hostname'}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+
+ self._get_ds(data).get_data()
+ self.assertIn('hostname', data['apply_hostname_bounce'])
+ self.assertEqual(data['apply_hostname_bounce']['hostname'],
+ odata['HostName'])
+
+ def test_apply_bounce_call_configurable(self):
+ # hostname_bounce should be configurable in datasource cfg
+ cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off',
+ 'command': 'my-bounce-command',
+ 'hostname_command': 'my-hostname-command'}}
+ odata = {'HostName': "xhost",
+ 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)),
+ 'encoding': 'base64'}}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ self._get_ds(data).get_data()
+
+ for k in cfg['hostname_bounce']:
+ self.assertIn(k, data['apply_hostname_bounce'])
+
+ for k, v in cfg['hostname_bounce'].items():
+ self.assertEqual(data['apply_hostname_bounce'][k], v)
+
+ def test_set_hostname_disabled(self):
+ # config specifying set_hostname off should not bounce
+ cfg = {'set_hostname': False}
+ odata = {'HostName': "xhost",
+ 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)),
+ 'encoding': 'base64'}}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ self._get_ds(data).get_data()
+
+ self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A")
+
+
+class TestReadAzureOvf(MockerTestCase):
+ def test_invalid_xml_raises_non_azure_ds(self):
+ invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
+ self.assertRaises(DataSourceAzure.NonAzureDataSource,
+ DataSourceAzure.read_azure_ovf, invalid_xml)
+
+ def test_load_with_pubkeys(self):
+ mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}]
+ pubkeys = [(x['fingerprint'], x['path']) for x in mypklist]
+ content = construct_valid_ovf_env(pubkeys=pubkeys)
+ (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content)
+ for mypk in mypklist:
+ self.assertIn(mypk, cfg['_pubkeys'])
+
+
+class TestReadAzureSharedConfig(MockerTestCase):
+ def test_valid_content(self):
+ xml = """<?xml version="1.0" encoding="utf-8"?>
+ <SharedConfig>
+ <Deployment name="MY_INSTANCE_ID">
+ <Service name="myservice"/>
+ <ServiceInstance name="INSTANCE_ID.0" guid="{abcd-uuid}" />
+ </Deployment>
+ <Incarnation number="1"/>
+ </SharedConfig>"""
+ ret = DataSourceAzure.iid_from_shared_config_content(xml)
+ self.assertEqual("MY_INSTANCE_ID", ret)
+
+
+def apply_patches(patches):
+ ret = []
+ for (ref, name, replace) in patches:
+ if replace is None:
+ continue
+ orig = getattr(ref, name)
+ setattr(ref, name, replace)
+ ret.append((ref, name, orig))
+ return ret
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 930086db..d5935294 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -259,8 +259,9 @@ class TestConfigDriveDataSource(MockerTestCase):
def test_find_candidates(self):
devs_with_answers = {}
- def my_devs_with(criteria):
- return devs_with_answers[criteria]
+ def my_devs_with(*args, **kwargs):
+ criteria = args[0] if len(args) else kwargs.pop('criteria', None)
+ return devs_with_answers.get(criteria, [])
def my_is_partition(dev):
return dev[-1] in "0123456789" and not dev.startswith("sr")
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index b56fea82..2007a6df 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -3,12 +3,13 @@ import os
from cloudinit.sources import DataSourceMAAS
from cloudinit import url_helper
+from cloudinit import util
from tests.unittests.helpers import populate_dir
-from mocker import MockerTestCase
+import mocker
-class TestMAASDataSource(MockerTestCase):
+class TestMAASDataSource(mocker.MockerTestCase):
def setUp(self):
super(TestMAASDataSource, self).setUp()
@@ -115,9 +116,12 @@ class TestMAASDataSource(MockerTestCase):
for key in valid_order:
url = "%s/%s/%s" % (my_seed, my_ver, key)
- mock_request(url, headers=my_headers, timeout=None)
+ mock_request(url, headers=None, timeout=mocker.ANY,
+ data=mocker.ANY, sec_between=mocker.ANY,
+ ssl_details=mocker.ANY, retries=mocker.ANY,
+ headers_cb=my_headers_cb)
resp = valid.get(key)
- self.mocker.result(url_helper.UrlResponse(200, resp))
+ self.mocker.result(util.StringResponse(resp))
self.mocker.replay()
(userdata, metadata) = DataSourceMAAS.read_maas_seed_url(my_seed,
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 28e0a472..7328b240 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -1,7 +1,7 @@
from cloudinit import helpers
-from tests.unittests.helpers import populate_dir
from cloudinit.sources import DataSourceNoCloud
from cloudinit import util
+from tests.unittests.helpers import populate_dir
from mocker import MockerTestCase
import os
@@ -22,7 +22,7 @@ class TestNoCloudDataSource(MockerTestCase):
def tearDown(self):
apply_patches([i for i in reversed(self.unapply)])
- super(TestNoCloudDataSource, self).setUp()
+ super(TestNoCloudDataSource, self).tearDown()
def apply_patches(self, patches):
ret = apply_patches(patches)
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
new file mode 100644
index 00000000..f53715b0
--- /dev/null
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -0,0 +1,273 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2013 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# This is a testcase for the SmartOS datasource. It replicates a serial
+# console and acts like the SmartOS console does in order to validate
+# return responses.
+#
+
+import base64
+from cloudinit import helpers
+from cloudinit.sources import DataSourceSmartOS
+
+from mocker import MockerTestCase
+import uuid
+
+MOCK_RETURNS = {
+ 'hostname': 'test-host',
+ 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname',
+ 'disable_iptables_flag': None,
+ 'enable_motd_sys_info': None,
+ 'test-var1': 'some data',
+ 'user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
+}
+
+DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc')
+
+
+class MockSerial(object):
+ """Fake a serial terminal for testing the code that
+ interfaces with the serial"""
+
+ port = None
+
+ def __init__(self, mockdata):
+ self.last = None
+ self.last = None
+ self.new = True
+ self.count = 0
+ self.mocked_out = []
+ self.mockdata = mockdata
+
+ def open(self):
+ return True
+
+ def close(self):
+ return True
+
+ def isOpen(self):
+ return True
+
+ def write(self, line):
+ line = line.replace('GET ', '')
+ self.last = line.rstrip()
+
+ def readline(self):
+ if self.new:
+ self.new = False
+ if self.last in self.mockdata:
+ return 'SUCCESS\n'
+ else:
+ return 'NOTFOUND %s\n' % self.last
+
+ if self.last in self.mockdata:
+ if not self.mocked_out:
+ self.mocked_out = [x for x in self._format_out()]
+ print self.mocked_out
+
+ if len(self.mocked_out) > self.count:
+ self.count += 1
+ return self.mocked_out[self.count - 1]
+
+ def _format_out(self):
+ if self.last in self.mockdata:
+ _mret = self.mockdata[self.last]
+ try:
+ for l in _mret.splitlines():
+ yield "%s\n" % l.rstrip()
+ except:
+ yield "%s\n" % _mret.rstrip()
+
+ yield '.'
+ yield '\n'
+
+
+class TestSmartOSDataSource(MockerTestCase):
+ def setUp(self):
+ # makeDir comes from MockerTestCase
+ self.tmp = self.makeDir()
+
+ # patch cloud_dir, so our 'seed_dir' is guaranteed empty
+ self.paths = helpers.Paths({'cloud_dir': self.tmp})
+
+ self.unapply = []
+ super(TestSmartOSDataSource, self).setUp()
+
+ def tearDown(self):
+ apply_patches([i for i in reversed(self.unapply)])
+ super(TestSmartOSDataSource, self).tearDown()
+
+ def apply_patches(self, patches):
+ ret = apply_patches(patches)
+ self.unapply += ret
+
+ def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None):
+ mod = DataSourceSmartOS
+
+ if mockdata is None:
+ mockdata = MOCK_RETURNS
+
+ if dmi_data is None:
+ dmi_data = DMI_DATA_RETURN
+
+ def _get_serial(*_):
+ return MockSerial(mockdata)
+
+ def _dmi_data():
+ return dmi_data
+
+ if sys_cfg is None:
+ sys_cfg = {}
+
+ if ds_cfg is not None:
+ sys_cfg['datasource'] = sys_cfg.get('datasource', {})
+ sys_cfg['datasource']['SmartOS'] = ds_cfg
+
+ self.apply_patches([(mod, 'get_serial', _get_serial)])
+ self.apply_patches([(mod, 'dmi_data', _dmi_data)])
+ dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None,
+ paths=self.paths)
+ return dsrc
+
+ def test_seed(self):
+ # default seed should be /dev/ttyS1
+ dsrc = self._get_ds()
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals('/dev/ttyS1', dsrc.seed)
+
+ def test_issmartdc(self):
+ dsrc = self._get_ds()
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertTrue(dsrc.is_smartdc)
+
+ def test_no_base64(self):
+ ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
+ dsrc = self._get_ds(ds_cfg=ds_cfg)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ def test_uuid(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(DMI_DATA_RETURN[0], dsrc.metadata['instance-id'])
+
+ def test_root_keys(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
+ dsrc.metadata['public-keys'])
+
+ def test_hostname_b64(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['hostname'],
+ dsrc.metadata['local-hostname'])
+
+ def test_hostname(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['hostname'],
+ dsrc.metadata['local-hostname'])
+
+ def test_base64_all(self):
+ # metadata provided base64_all of true
+ my_returns = MOCK_RETURNS.copy()
+ my_returns['base64_all'] = "true"
+ for k in ('hostname', 'user-data'):
+ my_returns[k] = base64.b64encode(my_returns[k])
+
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['hostname'],
+ dsrc.metadata['local-hostname'])
+ self.assertEquals(MOCK_RETURNS['user-data'],
+ dsrc.userdata_raw)
+ self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
+ dsrc.metadata['public-keys'])
+ self.assertEquals(MOCK_RETURNS['disable_iptables_flag'],
+ dsrc.metadata['iptables_disable'])
+ self.assertEquals(MOCK_RETURNS['enable_motd_sys_info'],
+ dsrc.metadata['motd_sys_info'])
+
+ def test_b64_userdata(self):
+ my_returns = MOCK_RETURNS.copy()
+ my_returns['b64-user-data'] = "true"
+ my_returns['b64-hostname'] = "true"
+ for k in ('hostname', 'user-data'):
+ my_returns[k] = base64.b64encode(my_returns[k])
+
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['hostname'],
+ dsrc.metadata['local-hostname'])
+ self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
+ self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
+ dsrc.metadata['public-keys'])
+
+ def test_b64_keys(self):
+ my_returns = MOCK_RETURNS.copy()
+ my_returns['base64_keys'] = 'hostname,ignored'
+ for k in ('hostname',):
+ my_returns[k] = base64.b64encode(my_returns[k])
+
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['hostname'],
+ dsrc.metadata['local-hostname'])
+ self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
+
+ def test_userdata(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
+
+ def test_disable_iptables_flag(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['disable_iptables_flag'],
+ dsrc.metadata['iptables_disable'])
+
+ def test_motd_sys_info(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['enable_motd_sys_info'],
+ dsrc.metadata['motd_sys_info'])
+
+
+def apply_patches(patches):
+ ret = []
+ for (ref, name, replace) in patches:
+ if replace is None:
+ continue
+ orig = getattr(ref, name)
+ setattr(ref, name, replace)
+ ret.append((ref, name, orig))
+ return ret
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
new file mode 100644
index 00000000..203dd2aa
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -0,0 +1,106 @@
+from mocker import MockerTestCase
+
+from cloudinit import util
+
+from cloudinit.config import cc_apt_configure
+
+import os
+import re
+
+
+class TestAptProxyConfig(MockerTestCase):
+ def setUp(self):
+ super(TestAptProxyConfig, self).setUp()
+ self.tmp = self.makeDir()
+ self.pfile = os.path.join(self.tmp, "proxy.cfg")
+ self.cfile = os.path.join(self.tmp, "config.cfg")
+
+ def _search_apt_config(self, contents, ptype, value):
+ print(
+ r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
+ contents, "flags=re.IGNORECASE")
+ return(re.search(
+ r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_proxy_written(self):
+ cfg = {'apt_proxy': 'myproxy'}
+ cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
+
+ self.assertTrue(os.path.isfile(self.pfile))
+ self.assertFalse(os.path.isfile(self.cfile))
+
+ contents = str(util.read_file_or_url(self.pfile))
+ self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
+
+ def test_apt_http_proxy_written(self):
+ cfg = {'apt_http_proxy': 'myproxy'}
+ cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
+
+ self.assertTrue(os.path.isfile(self.pfile))
+ self.assertFalse(os.path.isfile(self.cfile))
+
+ contents = str(util.read_file_or_url(self.pfile))
+ self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
+
+ def test_apt_all_proxy_written(self):
+ cfg = {'apt_http_proxy': 'myproxy_http_proxy',
+ 'apt_https_proxy': 'myproxy_https_proxy',
+ 'apt_ftp_proxy': 'myproxy_ftp_proxy'}
+
+ values = {'http': cfg['apt_http_proxy'],
+ 'https': cfg['apt_https_proxy'],
+ 'ftp': cfg['apt_ftp_proxy'],
+ }
+
+ cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
+
+ self.assertTrue(os.path.isfile(self.pfile))
+ self.assertFalse(os.path.isfile(self.cfile))
+
+ contents = str(util.read_file_or_url(self.pfile))
+
+ for ptype, pval in values.iteritems():
+ self.assertTrue(self._search_apt_config(contents, ptype, pval))
+
+ def test_proxy_deleted(self):
+ util.write_file(self.cfile, "content doesnt matter")
+ cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile)
+ self.assertFalse(os.path.isfile(self.pfile))
+ self.assertFalse(os.path.isfile(self.cfile))
+
+ def test_proxy_replaced(self):
+ util.write_file(self.cfile, "content doesnt matter")
+ cc_apt_configure.apply_apt_config({'apt_proxy': "foo"},
+ self.pfile, self.cfile)
+ self.assertTrue(os.path.isfile(self.pfile))
+ contents = str(util.read_file_or_url(self.pfile))
+ self.assertTrue(self._search_apt_config(contents, "http", "foo"))
+
+ def test_config_written(self):
+ payload = 'this is my apt config'
+ cfg = {'apt_config': payload}
+
+ cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
+
+ self.assertTrue(os.path.isfile(self.cfile))
+ self.assertFalse(os.path.isfile(self.pfile))
+
+ self.assertEqual(str(util.read_file_or_url(self.cfile)), payload)
+
+ def test_config_replaced(self):
+ util.write_file(self.pfile, "content doesnt matter")
+ cc_apt_configure.apply_apt_config({'apt_config': "foo"},
+ self.pfile, self.cfile)
+ self.assertTrue(os.path.isfile(self.cfile))
+ self.assertEqual(str(util.read_file_or_url(self.cfile)), "foo")
+
+ def test_config_deleted(self):
+ # if no 'apt_config' is provided, delete any previously written file
+ util.write_file(self.pfile, "content doesnt matter")
+ cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile)
+ self.assertFalse(os.path.isfile(self.pfile))
+ self.assertFalse(os.path.isfile(self.cfile))
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
new file mode 100644
index 00000000..c0497e08
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -0,0 +1,258 @@
+from mocker import MockerTestCase
+
+from cloudinit import cloud
+from cloudinit import util
+
+from cloudinit.config import cc_growpart
+
+import errno
+import logging
+import os
+import re
+import unittest
+
+# growpart:
+# mode: auto # off, on, auto, 'growpart', 'parted'
+# devices: ['root']
+
+HELP_PARTED_NO_RESIZE = """
+Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
+Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
+interactive mode.
+
+OPTIONs:
+<SNIP>
+
+COMMANDs:
+<SNIP>
+ quit exit program
+ rescue START END rescue a lost partition near START
+ and END
+ resize NUMBER START END resize partition NUMBER and its file
+ system
+ rm NUMBER delete partition NUMBER
+<SNIP>
+Report bugs to bug-parted@gnu.org
+"""
+
+HELP_PARTED_RESIZE = """
+Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
+Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
+interactive mode.
+
+OPTIONs:
+<SNIP>
+
+COMMANDs:
+<SNIP>
+ quit exit program
+ rescue START END rescue a lost partition near START
+ and END
+ resize NUMBER START END resize partition NUMBER and its file
+ system
+ resizepart NUMBER END resize partition NUMBER
+ rm NUMBER delete partition NUMBER
+<SNIP>
+Report bugs to bug-parted@gnu.org
+"""
+
+HELP_GROWPART_RESIZE = """
+growpart disk partition
+ rewrite partition table so that partition takes up all the space it can
+ options:
+ -h | --help print Usage and exit
+<SNIP>
+ -u | --update R update the the kernel partition table info after growing
+ this requires kernel support and 'partx --update'
+ R is one of:
+ - 'auto' : [default] update partition if possible
+<SNIP>
+ Example:
+ - growpart /dev/sda 1
+ Resize partition 1 on /dev/sda
+"""
+
+HELP_GROWPART_NO_RESIZE = """
+growpart disk partition
+ rewrite partition table so that partition takes up all the space it can
+ options:
+ -h | --help print Usage and exit
+<SNIP>
+ Example:
+ - growpart /dev/sda 1
+ Resize partition 1 on /dev/sda
+"""
+
+
+class TestDisabled(MockerTestCase):
+ def setUp(self):
+ super(TestDisabled, self).setUp()
+ self.name = "growpart"
+ self.cloud_init = None
+ self.log = logging.getLogger("TestDisabled")
+ self.args = []
+
+ self.handle = cc_growpart.handle
+
+ def test_mode_off(self):
+ #Test that nothing is done if mode is off.
+
+ # this really only verifies that resizer_factory isn't called
+ config = {'growpart': {'mode': 'off'}}
+ self.mocker.replace(cc_growpart.resizer_factory,
+ passthrough=False)
+ self.mocker.replay()
+
+ self.handle(self.name, config, self.cloud_init, self.log, self.args)
+
+
+class TestConfig(MockerTestCase):
+ def setUp(self):
+ super(TestConfig, self).setUp()
+ self.name = "growpart"
+ self.paths = None
+ self.cloud = cloud.Cloud(None, self.paths, None, None, None)
+ self.log = logging.getLogger("TestConfig")
+ self.args = []
+ os.environ = {}
+
+ self.cloud_init = None
+ self.handle = cc_growpart.handle
+
+ # Order must be correct
+ self.mocker.order()
+
+ @unittest.skip("until LP: #1212444 fixed")
+ def test_no_resizers_auto_is_fine(self):
+ subp = self.mocker.replace(util.subp, passthrough=False)
+ subp(['parted', '--help'], env={'LANG': 'C'})
+ self.mocker.result((HELP_PARTED_NO_RESIZE, ""))
+ subp(['growpart', '--help'], env={'LANG': 'C'})
+ self.mocker.result((HELP_GROWPART_NO_RESIZE, ""))
+ self.mocker.replay()
+
+ config = {'growpart': {'mode': 'auto'}}
+ self.handle(self.name, config, self.cloud_init, self.log, self.args)
+
+ def test_no_resizers_mode_growpart_is_exception(self):
+ subp = self.mocker.replace(util.subp, passthrough=False)
+ subp(['growpart', '--help'], env={'LANG': 'C'})
+ self.mocker.result((HELP_GROWPART_NO_RESIZE, ""))
+ self.mocker.replay()
+
+ config = {'growpart': {'mode': "growpart"}}
+ self.assertRaises(ValueError, self.handle, self.name, config,
+ self.cloud_init, self.log, self.args)
+
+ @unittest.skip("until LP: #1212444 fixed")
+ def test_mode_auto_prefers_parted(self):
+ subp = self.mocker.replace(util.subp, passthrough=False)
+ subp(['parted', '--help'], env={'LANG': 'C'})
+ self.mocker.result((HELP_PARTED_RESIZE, ""))
+ self.mocker.replay()
+
+ ret = cc_growpart.resizer_factory(mode="auto")
+ self.assertTrue(isinstance(ret, cc_growpart.ResizeParted))
+
+ def test_handle_with_no_growpart_entry(self):
+ #if no 'growpart' entry in config, then mode=auto should be used
+
+ myresizer = object()
+
+ factory = self.mocker.replace(cc_growpart.resizer_factory,
+ passthrough=False)
+ rsdevs = self.mocker.replace(cc_growpart.resize_devices,
+ passthrough=False)
+ factory("auto")
+ self.mocker.result(myresizer)
+ rsdevs(myresizer, ["/"])
+ self.mocker.result((("/", cc_growpart.RESIZE.CHANGED, "my-message",),))
+ self.mocker.replay()
+
+ try:
+ orig_resizers = cc_growpart.RESIZERS
+ cc_growpart.RESIZERS = (('mysizer', object),)
+ self.handle(self.name, {}, self.cloud_init, self.log, self.args)
+ finally:
+ cc_growpart.RESIZERS = orig_resizers
+
+
+class TestResize(MockerTestCase):
+ def setUp(self):
+ super(TestResize, self).setUp()
+ self.name = "growpart"
+ self.log = logging.getLogger("TestResize")
+
+ # Order must be correct
+ self.mocker.order()
+
+ def test_simple_devices(self):
+ #test simple device list
+ # this patches out devent2dev, os.stat, and device_part_info
+ # so in the end, doesn't test a lot
+ devs = ["/dev/XXda1", "/dev/YYda2"]
+ devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5L,
+ st_nlink=1, st_uid=0, st_gid=6, st_size=0,
+ st_atime=0, st_mtime=0, st_ctime=0)
+ enoent = ["/dev/NOENT"]
+ real_stat = os.stat
+ resize_calls = []
+
+ class myresizer(object):
+ def resize(self, diskdev, partnum, partdev):
+ resize_calls.append((diskdev, partnum, partdev))
+ if partdev == "/dev/YYda2":
+ return (1024, 2048)
+ return (1024, 1024) # old size, new size
+
+ def mystat(path):
+ if path in devs:
+ return devstat_ret
+ if path in enoent:
+ e = OSError("%s: does not exist" % path)
+ e.errno = errno.ENOENT
+ raise e
+ return real_stat(path)
+
+ try:
+ opinfo = cc_growpart.device_part_info
+ cc_growpart.device_part_info = simple_device_part_info
+ os.stat = mystat
+
+ resized = cc_growpart.resize_devices(myresizer(), devs + enoent)
+
+ def find(name, res):
+ for f in res:
+ if f[0] == name:
+ return f
+ return None
+
+ self.assertEqual(cc_growpart.RESIZE.NOCHANGE,
+ find("/dev/XXda1", resized)[1])
+ self.assertEqual(cc_growpart.RESIZE.CHANGED,
+ find("/dev/YYda2", resized)[1])
+ self.assertEqual(cc_growpart.RESIZE.SKIPPED,
+ find(enoent[0], resized)[1])
+ #self.assertEqual(resize_calls,
+ #[("/dev/XXda", "1", "/dev/XXda1"),
+ #("/dev/YYda", "2", "/dev/YYda2")])
+ finally:
+ cc_growpart.device_part_info = opinfo
+ os.stat = real_stat
+
+
+def simple_device_part_info(devpath):
+ # simple stupid return (/dev/vda, 1) for /dev/vda
+ ret = re.search("([^0-9]*)([0-9]*)$", devpath)
+ x = (ret.group(1), ret.group(2))
+ return x
+
+
+class Bunch:
+ st_mode = None # fix pylint complaint
+
+ def __init__(self, **kwds):
+ self.__dict__.update(kwds)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
new file mode 100644
index 00000000..72ad00fd
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_locale.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# Based on test_handler_set_hostname.py
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit.config import cc_locale
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import util
+
+from cloudinit.sources import DataSourceNoCloud
+
+from tests.unittests import helpers as t_help
+
+from configobj import ConfigObj
+
+from StringIO import StringIO
+
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+class TestLocale(t_help.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestLocale, self).setUp()
+ self.new_root = self.makeDir(prefix="unittest_")
+
+ def _get_cloud(self, distro):
+ self.patchUtils(self.new_root)
+ paths = helpers.Paths({})
+
+ cls = distros.fetch(distro)
+ d = cls(distro, {}, paths)
+ ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
+ cc = cloud.Cloud(ds, paths, {}, d, None)
+ return cc
+
+ def test_set_locale_sles(self):
+
+ cfg = {
+ 'locale': 'My.Locale',
+ }
+ cc = self._get_cloud('sles')
+ cc_locale.handle('cc_locale', cfg, cc, LOG, [])
+
+ contents = util.load_file('/etc/sysconfig/language')
+ n_cfg = ConfigObj(StringIO(contents))
+ self.assertEquals({'RC_LANG': cfg['locale']}, dict(n_cfg))
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
index a1aba62f..6344ec0c 100644
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ b/tests/unittests/test_handler/test_handler_set_hostname.py
@@ -35,7 +35,6 @@ class TestHostname(t_help.FilesystemMockingTestCase):
ds = None
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
cc_set_hostname.handle('cc_set_hostname',
cfg, cc, LOG, [])
contents = util.load_file("/etc/sysconfig/network")
@@ -56,3 +55,16 @@ class TestHostname(t_help.FilesystemMockingTestCase):
cfg, cc, LOG, [])
contents = util.load_file("/etc/hostname")
self.assertEquals('blah', contents.strip())
+
+ def test_write_hostname_sles(self):
+ cfg = {
+ 'hostname': 'blah.blah.blah.suse.com',
+ }
+ distro = self._fetch_distro('sles')
+ paths = helpers.Paths({})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, [])
+ contents = util.load_file("/etc/HOSTNAME")
+ self.assertEquals('blah', contents.strip())
diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py
new file mode 100644
index 00000000..40b69773
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_timezone.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# Based on test_handler_set_hostname.py
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit.config import cc_timezone
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import util
+
+from cloudinit.sources import DataSourceNoCloud
+
+from tests.unittests import helpers as t_help
+
+from configobj import ConfigObj
+
+from StringIO import StringIO
+
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+class TestTimezone(t_help.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestTimezone, self).setUp()
+ self.new_root = self.makeDir(prefix="unittest_")
+
+ def _get_cloud(self, distro):
+ self.patchUtils(self.new_root)
+ self.patchOS(self.new_root)
+
+ paths = helpers.Paths({})
+
+ cls = distros.fetch(distro)
+ d = cls(distro, {}, paths)
+ ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
+ cc = cloud.Cloud(ds, paths, {}, d, None)
+ return cc
+
+ def test_set_timezone_sles(self):
+
+ cfg = {
+ 'timezone': 'Tatooine/Bestine',
+ }
+ cc = self._get_cloud('sles')
+
+ # Create a dummy timezone file
+ dummy_contents = '0123456789abcdefgh'
+ util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'],
+ dummy_contents)
+
+ cc_timezone.handle('cc_timezone', cfg, cc, LOG, [])
+
+ contents = util.load_file('/etc/sysconfig/clock')
+ n_cfg = ConfigObj(StringIO(contents))
+ self.assertEquals({'TIMEZONE': cfg['timezone']}, dict(n_cfg))
+
+ contents = util.load_file('/etc/localtime')
+ self.assertEquals(dummy_contents, contents.strip())
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
index 0037b966..486b9158 100644
--- a/tests/unittests/test_merging.py
+++ b/tests/unittests/test_merging.py
@@ -1,62 +1,255 @@
-from mocker import MockerTestCase
+from tests.unittests import helpers
+from cloudinit.handlers import cloud_config
+from cloudinit.handlers import (CONTENT_START, CONTENT_END)
+
+from cloudinit import helpers as c_helpers
from cloudinit import util
+import collections
+import glob
+import os
+import random
+import re
+import string # pylint: disable=W0402
+
+SOURCE_PAT = "source*.*yaml"
+EXPECTED_PAT = "expected%s.yaml"
+TYPES = [long, int, dict, str, list, tuple, None]
+
+
+def _old_mergedict(src, cand):
+ """
+ Merge values from C{cand} into C{src}.
+ If C{src} has a key C{cand} will not override.
+ Nested dictionaries are merged recursively.
+ """
+ if isinstance(src, dict) and isinstance(cand, dict):
+ for (k, v) in cand.iteritems():
+ if k not in src:
+ src[k] = v
+ else:
+ src[k] = _old_mergedict(src[k], v)
+ return src
+
+
+def _old_mergemanydict(*args):
+ out = {}
+ for a in args:
+ out = _old_mergedict(out, a)
+ return out
+
+
+def _random_str(rand):
+ base = ''
+ for _i in xrange(rand.randint(1, 2 ** 8)):
+ base += rand.choice(string.letters + string.digits)
+ return base
+
+
+class _NoMoreException(Exception):
+ pass
+
+
+def _make_dict(current_depth, max_depth, rand):
+ if current_depth >= max_depth:
+ raise _NoMoreException()
+ if current_depth == 0:
+ t = dict
+ else:
+ t = rand.choice(TYPES)
+ base = None
+ if t in [None]:
+ return base
+ if t in [dict, list, tuple]:
+ if t in [dict]:
+ amount = rand.randint(0, 5)
+ keys = [_random_str(rand) for _i in xrange(0, amount)]
+ base = {}
+ for k in keys:
+ try:
+ base[k] = _make_dict(current_depth + 1, max_depth, rand)
+ except _NoMoreException:
+ pass
+ elif t in [list, tuple]:
+ base = []
+ amount = rand.randint(0, 5)
+ for _i in xrange(0, amount):
+ try:
+ base.append(_make_dict(current_depth + 1, max_depth, rand))
+ except _NoMoreException:
+ pass
+ if t in [tuple]:
+ base = tuple(base)
+ elif t in [long, int]:
+ base = rand.randint(0, 2 ** 8)
+ elif t in [str]:
+ base = _random_str(rand)
+ return base
+
+
+def make_dict(max_depth, seed=None):
+ max_depth = max(1, max_depth)
+ rand = random.Random(seed)
+ return _make_dict(0, max_depth, rand)
+
+
+class TestSimpleRun(helpers.ResourceUsingTestCase):
+ def _load_merge_files(self):
+ merge_root = self.resourceLocation('merge_sources')
+ tests = []
+ source_ids = collections.defaultdict(list)
+ expected_files = {}
+ for fn in glob.glob(os.path.join(merge_root, SOURCE_PAT)):
+ base_fn = os.path.basename(fn)
+ file_id = re.match(r"source(\d+)\-(\d+)[.]yaml", base_fn)
+ if not file_id:
+ raise IOError("File %s does not have a numeric identifier"
+ % (fn))
+ file_id = int(file_id.group(1))
+ source_ids[file_id].append(fn)
+ expected_fn = os.path.join(merge_root, EXPECTED_PAT % (file_id))
+ if not os.path.isfile(expected_fn):
+ raise IOError("No expected file found at %s" % (expected_fn))
+ expected_files[file_id] = expected_fn
+ for i in sorted(source_ids.keys()):
+ source_file_contents = []
+ for fn in sorted(source_ids[i]):
+ source_file_contents.append([fn, util.load_file(fn)])
+ expected = util.load_yaml(util.load_file(expected_files[i]))
+ entry = [source_file_contents, [expected, expected_files[i]]]
+ tests.append(entry)
+ return tests
+
+ def test_seed_runs(self):
+ test_dicts = []
+ for i in range(1, 50):
+ base_dicts = []
+ for j in range(1, 50):
+ base_dicts.append(make_dict(5, i * j))
+ test_dicts.append(base_dicts)
+ for test in test_dicts:
+ c = _old_mergemanydict(*test)
+ d = util.mergemanydict(test)
+ self.assertEquals(c, d)
+
+ def test_merge_cc_samples(self):
+ tests = self._load_merge_files()
+ paths = c_helpers.Paths({})
+ cc_handler = cloud_config.CloudConfigPartHandler(paths)
+ cc_handler.cloud_fn = None
+ for (payloads, (expected_merge, expected_fn)) in tests:
+ cc_handler.handle_part(None, CONTENT_START, None,
+ None, None, None)
+ merging_fns = []
+ for (fn, contents) in payloads:
+ cc_handler.handle_part(None, None, "%s.yaml" % (fn),
+ contents, None, {})
+ merging_fns.append(fn)
+ merged_buf = cc_handler.cloud_buf
+ cc_handler.handle_part(None, CONTENT_END, None,
+ None, None, None)
+ fail_msg = "Equality failure on checking %s with %s: %s != %s"
+ fail_msg = fail_msg % (expected_fn,
+ ",".join(merging_fns), merged_buf,
+ expected_merge)
+ self.assertEquals(expected_merge, merged_buf, msg=fail_msg)
+
+ def test_compat_merges_dict(self):
+ a = {
+ '1': '2',
+ 'b': 'c',
+ }
+ b = {
+ 'b': 'e',
+ }
+ c = _old_mergedict(a, b)
+ d = util.mergemanydict([a, b])
+ self.assertEquals(c, d)
+
+ def test_compat_merges_dict2(self):
+ a = {
+ 'Blah': 1,
+ 'Blah2': 2,
+ 'Blah3': 3,
+ }
+ b = {
+ 'Blah': 1,
+ 'Blah2': 2,
+ 'Blah3': [1],
+ }
+ c = _old_mergedict(a, b)
+ d = util.mergemanydict([a, b])
+ self.assertEquals(c, d)
+
+ def test_compat_merges_list(self):
+ a = {'b': [1, 2, 3]}
+ b = {'b': [4, 5]}
+ c = {'b': [6, 7]}
+ e = _old_mergemanydict(a, b, c)
+ f = util.mergemanydict([a, b, c])
+ self.assertEquals(e, f)
+
+ def test_compat_merges_str(self):
+ a = {'b': "hi"}
+ b = {'b': "howdy"}
+ c = {'b': "hallo"}
+ e = _old_mergemanydict(a, b, c)
+ f = util.mergemanydict([a, b, c])
+ self.assertEquals(e, f)
+
+ def test_compat_merge_sub_dict(self):
+ a = {
+ '1': '2',
+ 'b': {
+ 'f': 'g',
+ 'e': 'c',
+ 'h': 'd',
+ 'hh': {
+ '1': 2,
+ },
+ }
+ }
+ b = {
+ 'b': {
+ 'e': 'c',
+ 'hh': {
+ '3': 4,
+ }
+ }
+ }
+ c = _old_mergedict(a, b)
+ d = util.mergemanydict([a, b])
+ self.assertEquals(c, d)
+
+ def test_compat_merge_sub_dict2(self):
+ a = {
+ '1': '2',
+ 'b': {
+ 'f': 'g',
+ }
+ }
+ b = {
+ 'b': {
+ 'e': 'c',
+ }
+ }
+ c = _old_mergedict(a, b)
+ d = util.mergemanydict([a, b])
+ self.assertEquals(c, d)
-class TestMergeDict(MockerTestCase):
- def test_simple_merge(self):
- """Test simple non-conflict merge."""
- source = {"key1": "value1"}
- candidate = {"key2": "value2"}
- result = util.mergedict(source, candidate)
- self.assertEqual({"key1": "value1", "key2": "value2"}, result)
-
- def test_nested_merge(self):
- """Test nested merge."""
- source = {"key1": {"key1.1": "value1.1"}}
- candidate = {"key1": {"key1.2": "value1.2"}}
- result = util.mergedict(source, candidate)
- self.assertEqual(
- {"key1": {"key1.1": "value1.1", "key1.2": "value1.2"}}, result)
-
- def test_merge_does_not_override(self):
- """Test that candidate doesn't override source."""
- source = {"key1": "value1", "key2": "value2"}
- candidate = {"key1": "value2", "key2": "NEW VALUE"}
- result = util.mergedict(source, candidate)
- self.assertEqual(source, result)
-
- def test_empty_candidate(self):
- """Test empty candidate doesn't change source."""
- source = {"key": "value"}
- candidate = {}
- result = util.mergedict(source, candidate)
- self.assertEqual(source, result)
-
- def test_empty_source(self):
- """Test empty source is replaced by candidate."""
- source = {}
- candidate = {"key": "value"}
- result = util.mergedict(source, candidate)
- self.assertEqual(candidate, result)
-
- def test_non_dict_candidate(self):
- """Test non-dict candidate is discarded."""
- source = {"key": "value"}
- candidate = "not a dict"
- result = util.mergedict(source, candidate)
- self.assertEqual(source, result)
-
- def test_non_dict_source(self):
- """Test non-dict source is not modified with a dict candidate."""
- source = "not a dict"
- candidate = {"key": "value"}
- result = util.mergedict(source, candidate)
- self.assertEqual(source, result)
-
- def test_neither_dict(self):
- """Test if neither candidate or source is dict source wins."""
- source = "source"
- candidate = "candidate"
- result = util.mergedict(source, candidate)
- self.assertEqual(source, result)
+ def test_compat_merge_sub_list(self):
+ a = {
+ '1': '2',
+ 'b': {
+ 'f': ['1'],
+ }
+ }
+ b = {
+ 'b': {
+ 'f': [],
+ }
+ }
+ c = _old_mergedict(a, b)
+ d = util.mergemanydict([a, b])
+ self.assertEquals(c, d)
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
new file mode 100644
index 00000000..d8662cac
--- /dev/null
+++ b/tests/unittests/test_sshutil.py
@@ -0,0 +1,101 @@
+from cloudinit import ssh_util
+from unittest import TestCase
+
+
+VALID_CONTENT = {
+ 'dsa': (
+ "AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF"
+ "W4jgIOMcNanULRrZGjkOKat6MWJNetSbV1E6IOFDQ16rQgsh/OvYU9XhzM8seLa"
+ "A21VszZuhIV7/2DE3vxu7B54zVzueG1O1Deq6goQCRGWBUnqO2yluJiG4HzrnDa"
+ "jzRAAAAFQDMPO96qXd4F5A+5b2f2MO7SpVomQAAAIBpC3K2zIbDLqBBs1fn7rsv"
+ "KcJvwihdlVjG7UXsDB76P2GNqVG+IlYPpJZ8TO/B/fzTMtrdXp9pSm9OY1+BgN4"
+ "REsZ2WNcvfgY33aWaEM+ieCcQigvxrNAF2FTVcbUIIxAn6SmHuQSWrLSfdHc8H7"
+ "hsrgeUPPdzjBD/cv2ZmqwZ1AAAAIAplIsScrJut5wJMgyK1JG0Kbw9JYQpLe95P"
+ "obB069g8+mYR8U0fysmTEdR44mMu0VNU5E5OhTYoTGfXrVrkR134LqFM2zpVVbE"
+ "JNDnIqDHxTkc6LY2vu8Y2pQ3/bVnllZZOda2oD5HQ7ovygQa6CH+fbaZHbdDUX/"
+ "5z7u2rVAlDw=="
+ ),
+ 'ecdsa': (
+ "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITrGBB3cgJ"
+ "J7fPxvtMW9H3oRisNpJ3OAslxZeyP7I0A9BPAW0RQIwHVtVnM7zrp4nI+JLZov/"
+ "Ql7lc2leWL7CY="
+ ),
+ 'rsa': (
+ "AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5oz"
+ "emNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbD"
+ "c1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q"
+ "7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhT"
+ "YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07"
+ "/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw=="
+ ),
+}
+
+TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
+ 'command="echo \'Please login as the user \"ubuntu\" rather than the'
+ 'user \"root\".\';echo;sleep 10"')
+
+
+class TestAuthKeyLineParser(TestCase):
+ def test_simple_parse(self):
+ # test key line with common 3 fields (keytype, base64, comment)
+ parser = ssh_util.AuthKeyLineParser()
+ for ktype in ['rsa', 'ecdsa', 'dsa']:
+ content = VALID_CONTENT[ktype]
+ comment = 'user-%s@host' % ktype
+ line = ' '.join((ktype, content, comment,))
+ key = parser.parse(line)
+
+ self.assertEqual(key.base64, content)
+ self.assertFalse(key.options)
+ self.assertEqual(key.comment, comment)
+ self.assertEqual(key.keytype, ktype)
+
+ def test_parse_no_comment(self):
+ # test key line with key type and base64 only
+ parser = ssh_util.AuthKeyLineParser()
+ for ktype in ['rsa', 'ecdsa', 'dsa']:
+ content = VALID_CONTENT[ktype]
+ line = ' '.join((ktype, content,))
+ key = parser.parse(line)
+
+ self.assertEqual(key.base64, content)
+ self.assertFalse(key.options)
+ self.assertFalse(key.comment)
+ self.assertEqual(key.keytype, ktype)
+
+ def test_parse_with_keyoptions(self):
+ # test key line with options in it
+ parser = ssh_util.AuthKeyLineParser()
+ options = TEST_OPTIONS
+ for ktype in ['rsa', 'ecdsa', 'dsa']:
+ content = VALID_CONTENT[ktype]
+ comment = 'user-%s@host' % ktype
+ line = ' '.join((options, ktype, content, comment,))
+ key = parser.parse(line)
+
+ self.assertEqual(key.base64, content)
+ self.assertEqual(key.options, options)
+ self.assertEqual(key.comment, comment)
+ self.assertEqual(key.keytype, ktype)
+
+ def test_parse_with_options_passed_in(self):
+ # test key line with key type and base64 only
+ parser = ssh_util.AuthKeyLineParser()
+
+ baseline = ' '.join(("rsa", VALID_CONTENT['rsa'], "user@host"))
+ myopts = "no-port-forwarding,no-agent-forwarding"
+
+ key = parser.parse("allowedopt" + " " + baseline)
+ self.assertEqual(key.options, "allowedopt")
+
+ key = parser.parse("overridden_opt " + baseline, options=myopts)
+ self.assertEqual(key.options, myopts)
+
+ def test_parse_invalid_keytype(self):
+ parser = ssh_util.AuthKeyLineParser()
+ key = parser.parse(' '.join(["badkeytype", VALID_CONTENT['rsa']]))
+
+ self.assertFalse(key.valid())
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py
index 82a4c555..b227616c 100644
--- a/tests/unittests/test_userdata.py
+++ b/tests/unittests/test_userdata.py
@@ -2,19 +2,25 @@
import StringIO
+import gzip
import logging
import os
from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.application import MIMEApplication
-from mocker import MockerTestCase
-
+from cloudinit import handlers
+from cloudinit import helpers as c_helpers
from cloudinit import log
from cloudinit import sources
from cloudinit import stages
+from cloudinit import util
INSTANCE_ID = "i-testing"
+from tests.unittests import helpers
+
class FakeDataSource(sources.DataSource):
@@ -26,22 +32,16 @@ class FakeDataSource(sources.DataSource):
# FIXME: these tests shouldn't be checking log output??
# Weirddddd...
-
-
-class TestConsumeUserData(MockerTestCase):
+class TestConsumeUserData(helpers.FilesystemMockingTestCase):
def setUp(self):
- MockerTestCase.setUp(self)
- # Replace the write so no actual files
- # get written out...
- self.mock_write = self.mocker.replace("cloudinit.util.write_file",
- passthrough=False)
+ helpers.FilesystemMockingTestCase.setUp(self)
self._log = None
self._log_file = None
self._log_handler = None
def tearDown(self):
- MockerTestCase.tearDown(self)
+ helpers.FilesystemMockingTestCase.tearDown(self)
if self._log_handler and self._log:
self._log.removeHandler(self._log_handler)
@@ -53,13 +53,134 @@ class TestConsumeUserData(MockerTestCase):
self._log.addHandler(self._log_handler)
return log_file
+ def test_simple_jsonp(self):
+ blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/baz", "value": "qux" },
+ { "op": "add", "path": "/bar", "value": "qux2" }
+]
+'''
+
+ ci = stages.Init()
+ ci.datasource = FakeDataSource(blob)
+ new_root = self.makeDir()
+ self.patchUtils(new_root)
+ self.patchOS(new_root)
+ ci.fetch()
+ ci.consume_userdata()
+ cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
+ cc = util.load_yaml(cc_contents)
+ self.assertEquals(2, len(cc))
+ self.assertEquals('qux', cc['baz'])
+ self.assertEquals('qux2', cc['bar'])
+
+ def test_mixed_cloud_config(self):
+ blob_cc = '''
+#cloud-config
+a: b
+c: d
+'''
+ message_cc = MIMEBase("text", "cloud-config")
+ message_cc.set_payload(blob_cc)
+
+ blob_jp = '''
+#cloud-config-jsonp
+[
+ { "op": "replace", "path": "/a", "value": "c" },
+ { "op": "remove", "path": "/c" }
+]
+'''
+
+ message_jp = MIMEBase('text', "cloud-config-jsonp")
+ message_jp.set_payload(blob_jp)
+
+ message = MIMEMultipart()
+ message.attach(message_cc)
+ message.attach(message_jp)
+
+ ci = stages.Init()
+ ci.datasource = FakeDataSource(str(message))
+ new_root = self.makeDir()
+ self.patchUtils(new_root)
+ self.patchOS(new_root)
+ ci.fetch()
+ ci.consume_userdata()
+ cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
+ cc = util.load_yaml(cc_contents)
+ self.assertEquals(1, len(cc))
+ self.assertEquals('c', cc['a'])
+
+ def test_merging_cloud_config(self):
+ blob = '''
+#cloud-config
+a: b
+e: f
+run:
+ - b
+ - c
+'''
+ message1 = MIMEBase("text", "cloud-config")
+ message1.set_payload(blob)
+
+ blob2 = '''
+#cloud-config
+a: e
+e: g
+run:
+ - stuff
+ - morestuff
+'''
+ message2 = MIMEBase("text", "cloud-config")
+ message2['X-Merge-Type'] = ('dict(recurse_array,'
+ 'recurse_str)+list(append)+str(append)')
+ message2.set_payload(blob2)
+
+ blob3 = '''
+#cloud-config
+e:
+ - 1
+ - 2
+ - 3
+p: 1
+'''
+ message3 = MIMEBase("text", "cloud-config")
+ message3.set_payload(blob3)
+
+ messages = [message1, message2, message3]
+
+ paths = c_helpers.Paths({}, ds=FakeDataSource(''))
+ cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths)
+
+ new_root = self.makeDir()
+ self.patchUtils(new_root)
+ self.patchOS(new_root)
+ cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None,
+ None)
+ for i, m in enumerate(messages):
+ headers = dict(m)
+ fn = "part-%s" % (i + 1)
+ payload = m.get_payload(decode=True)
+ cloud_cfg.handle_part(None, headers['Content-Type'],
+ fn, payload, None, headers)
+ cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None,
+ None)
+ contents = util.load_file(paths.get_ipath('cloud_config'))
+ contents = util.load_yaml(contents)
+ self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff'])
+ self.assertEquals(contents['a'], 'be')
+ self.assertEquals(contents['e'], [1, 2, 3])
+ self.assertEquals(contents['p'], 1)
+
def test_unhandled_type_warning(self):
"""Raw text without magic is ignored but shows warning."""
ci = stages.Init()
data = "arbitrary text\n"
ci.datasource = FakeDataSource(data)
- self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+ mock_write = self.mocker.replace("cloudinit.util.write_file",
+ passthrough=False)
+ mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
@@ -69,6 +190,46 @@ class TestConsumeUserData(MockerTestCase):
"Unhandled non-multipart (text/x-not-multipart) userdata:",
log_file.getvalue())
+ def test_mime_gzip_compressed(self):
+ """Tests that individual message gzip encoding works."""
+
+ def gzip_part(text):
+ contents = StringIO.StringIO()
+ f = gzip.GzipFile(fileobj=contents, mode='w')
+ f.write(str(text))
+ f.flush()
+ f.close()
+ return MIMEApplication(contents.getvalue(), 'gzip')
+
+ base_content1 = '''
+#cloud-config
+a: 2
+'''
+
+ base_content2 = '''
+#cloud-config
+b: 3
+c: 4
+'''
+
+ message = MIMEMultipart('test')
+ message.attach(gzip_part(base_content1))
+ message.attach(gzip_part(base_content2))
+ ci = stages.Init()
+ ci.datasource = FakeDataSource(str(message))
+ new_root = self.makeDir()
+ self.patchUtils(new_root)
+ self.patchOS(new_root)
+ ci.fetch()
+ ci.consume_userdata()
+ contents = util.load_file(ci.paths.get_ipath("cloud_config"))
+ contents = util.load_yaml(contents)
+ self.assertTrue(isinstance(contents, dict))
+ self.assertEquals(3, len(contents))
+ self.assertEquals(2, contents['a'])
+ self.assertEquals(3, contents['b'])
+ self.assertEquals(4, contents['c'])
+
def test_mime_text_plain(self):
"""Mime message of type text/plain is ignored but shows warning."""
ci = stages.Init()
@@ -76,7 +237,9 @@ class TestConsumeUserData(MockerTestCase):
message.set_payload("Just text")
ci.datasource = FakeDataSource(message.as_string())
- self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+ mock_write = self.mocker.replace("cloudinit.util.write_file",
+ passthrough=False)
+ mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
@@ -93,8 +256,10 @@ class TestConsumeUserData(MockerTestCase):
ci.datasource = FakeDataSource(script)
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
- self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
- self.mock_write(outpath, script, 0700)
+ mock_write = self.mocker.replace("cloudinit.util.write_file",
+ passthrough=False)
+ mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+ mock_write(outpath, script, 0700)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
@@ -111,8 +276,10 @@ class TestConsumeUserData(MockerTestCase):
ci.datasource = FakeDataSource(message.as_string())
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
- self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
- self.mock_write(outpath, script, 0700)
+ mock_write = self.mocker.replace("cloudinit.util.write_file",
+ passthrough=False)
+ mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+ mock_write(outpath, script, 0700)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
@@ -129,8 +296,10 @@ class TestConsumeUserData(MockerTestCase):
ci.datasource = FakeDataSource(message.as_string())
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
- self.mock_write(outpath, script, 0700)
- self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+ mock_write = self.mocker.replace("cloudinit.util.write_file",
+ passthrough=False)
+ mock_write(outpath, script, 0700)
+ mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
self.mocker.replay()
log_file = self.capture_log(logging.WARNING)
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 02611581..87415cb5 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,9 +1,12 @@
+# pylint: disable=C0301
+# the mountinfo data lines are too long
import os
import stat
import yaml
from mocker import MockerTestCase
from unittest import TestCase
+from tests.unittests import helpers
from cloudinit import importer
from cloudinit import util
@@ -248,4 +251,60 @@ class TestLoadYaml(TestCase):
myobj)
+class TestMountinfoParsing(helpers.ResourceUsingTestCase):
+ def test_invalid_mountinfo(self):
+ line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root"
+ "rw,errors=remount-ro,data=ordered")
+ elements = line.split()
+ for i in range(len(elements) + 1):
+ lines = [' '.join(elements[0:i])]
+ if i < 10:
+ expected = None
+ else:
+ expected = ('/dev/mapper/vg0-root', 'ext4', '/')
+ self.assertEqual(expected, util.parse_mount_info('/', lines))
+
+ def test_precise_ext4_root(self):
+
+ lines = self.readResource('mountinfo_precise_ext4.txt').splitlines()
+
+ expected = ('/dev/mapper/vg0-root', 'ext4', '/')
+ self.assertEqual(expected, util.parse_mount_info('/', lines))
+ self.assertEqual(expected, util.parse_mount_info('/usr', lines))
+ self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
+
+ expected = ('/dev/md0', 'ext4', '/boot')
+ self.assertEqual(expected, util.parse_mount_info('/boot', lines))
+ self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
+
+ expected = ('/dev/mapper/vg0-root', 'ext4', '/')
+ self.assertEqual(expected, util.parse_mount_info('/home', lines))
+ self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
+
+ expected = ('tmpfs', 'tmpfs', '/run')
+ self.assertEqual(expected, util.parse_mount_info('/run', lines))
+
+ expected = ('none', 'tmpfs', '/run/lock')
+ self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
+
+ def test_raring_btrfs_root(self):
+ lines = self.readResource('mountinfo_raring_btrfs.txt').splitlines()
+
+ expected = ('/dev/vda1', 'btrfs', '/')
+ self.assertEqual(expected, util.parse_mount_info('/', lines))
+ self.assertEqual(expected, util.parse_mount_info('/usr', lines))
+ self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
+ self.assertEqual(expected, util.parse_mount_info('/boot', lines))
+ self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
+
+ expected = ('/dev/vda1', 'btrfs', '/home')
+ self.assertEqual(expected, util.parse_mount_info('/home', lines))
+ self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
+
+ expected = ('tmpfs', 'tmpfs', '/run')
+ self.assertEqual(expected, util.parse_mount_info('/run', lines))
+
+ expected = ('none', 'tmpfs', '/run/lock')
+ self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
+
# vi: ts=4 expandtab
diff --git a/tools/ccfg-merge-debug b/tools/ccfg-merge-debug
new file mode 100755
index 00000000..5b6b050a
--- /dev/null
+++ b/tools/ccfg-merge-debug
@@ -0,0 +1,89 @@
+#!/usr/bin/python
+
+from cloudinit import handlers
+from cloudinit.handlers import cloud_config as cc_part
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import user_data as ud
+
+import argparse
+import os
+import shutil
+import tempfile
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description='test cloud-config merging')
+ parser.add_argument("--output", "-o", metavar="file",
+ help="specify output file", default="-")
+ parser.add_argument('--verbose', '-v', action='count', default=0)
+ parser.add_argument('files', nargs='+')
+
+ args = parser.parse_args()
+
+ if args.verbose:
+ level = (logging.WARN, logging.INFO,
+ logging.DEBUG)[min(args.verbose, 2)]
+ logging.setupBasicLogging(level)
+
+ outfile = args.output
+ if args.output == "-":
+ outfile = "/dev/stdout"
+
+ tempd = tempfile.mkdtemp()
+ handler_dir = os.path.join(tempd, "hdir")
+ data = None # the 'init' object
+ frequency = PER_INSTANCE
+
+ paths = helpers.Paths({})
+
+ # make a '#include <f1>' style
+ udproc = ud.UserDataProcessor(paths=paths)
+ user_data_msg = udproc.process("#include\n" +
+ '\n'.join([os.path.abspath(f) for f in args.files]))
+
+ ccph = cc_part.CloudConfigPartHandler(paths=paths)
+ ccph.cloud_fn = outfile
+
+ c_handlers = helpers.ContentHandlers()
+ c_handlers.register_defaults([ccph])
+
+ called = []
+ for (_ctype, mod) in c_handlers.iteritems():
+ if mod in called:
+ continue
+ handlers.call_begin(mod, data, frequency)
+ called.append(mod)
+
+ # Walk the user data
+ part_data = {
+ 'handlers': c_handlers,
+ # Any new handlers that are encountered get writen here
+ 'handlerdir': handler_dir,
+ 'data': data,
+ # The default frequency if handlers don't have one
+ 'frequency': frequency,
+ # This will be used when new handlers are found
+ # to help write there contents to files with numbered
+ # names...
+ 'handlercount': 0,
+ }
+
+ handlers.walk(user_data_msg, handlers.walker_callback, data=part_data)
+
+ # Give callbacks opportunity to finalize
+ called = []
+ for (_ctype, mod) in c_handlers.iteritems():
+ if mod in called:
+ continue
+ handlers.call_end(mod, data, frequency)
+ called.append(mod)
+
+ shutil.rmtree(tempd)
+
+if __name__ == "__main__":
+ main()
+
+# vi: ts=4 expandtab
diff --git a/tools/make-dist-tarball b/tools/make-dist-tarball
index 7742caea..5b078515 100755
--- a/tools/make-dist-tarball
+++ b/tools/make-dist-tarball
@@ -10,16 +10,12 @@ EOF
}
topdir="$PWD"
-tag=${1}
+tag="$1"
[ -n "$tag" ] || { Usage 1>&2 ; exit 1; }
-tmpd=$(mktemp -d );
-trap "rm -Rf '${tmpd}'" 0
+out="${topdir}/cloud-init-${tag}.tar.gz"
-out=${topdir}/cloud-init-${tag}.tar.gz
-
-cd ${tmpd} &&
- bzr branch -r "tag:${tag}" "${topdir}" ./cloud-init-${tag} &&
- tar czf "${out}" cloud-init-${tag}/ --exclude cloud-init-${tag}/.bzr &&
- echo "Wrote ${out}"
+bzr export --format=tgz --root="cloud-init-$tag" \
+ "--revision=tag:${tag}" "$out" "$topdir" &&
+ echo "Wrote ${out}"
diff --git a/tools/make-tarball b/tools/make-tarball
index 47979f5b..27f5f374 100755
--- a/tools/make-tarball
+++ b/tools/make-tarball
@@ -18,18 +18,16 @@ if ! find_root; then
exit 1;
fi
+REVNO=$(bzr revno "$ROOT_DIR")
+
if [ ! -z "$1" ]; then
ARCHIVE_FN="$1"
else
- REVNO=$(bzr revno $ROOT_DIR)
- VERSION=$($ROOT_DIR/tools/read-version)
+ VERSION=$("$ROOT_DIR/tools/read-version")
ARCHIVE_FN="$PWD/cloud-init-$VERSION~bzr$REVNO.tar.gz"
fi
-FILES=$(cd $ROOT_DIR && bzr ls --versioned --recursive)
-echo "$FILES" | tar czf $ARCHIVE_FN \
- -C "$ROOT_DIR" \
- --transform "s,^,cloud-init-$VERSION~bzr$REVNO/," \
- --no-recursion --files-from -
+bzr export --format=tgz --root="cloud-init-$VERSION~bzr$REVNO" \
+ "--revision=${REVNO}" "${ARCHIVE_FN}" "$ROOT_DIR"
echo "$ARCHIVE_FN"
diff --git a/tools/read-dependencies b/tools/read-dependencies
index 4c88aa87..cadb09a8 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -21,15 +21,11 @@ fi
REQUIRES="$ROOT_DIR/Requires"
-if [ ! -e "$REQUIRES" ]
-then
+if [ ! -e "$REQUIRES" ]; then
echo "Unable to find 'Requires' file located at $REQUIRES"
exit 1
fi
# Filter out comments and empty liens
-DEPS=$(cat $REQUIRES | grep -Pv "^\s*#" | grep -Pv '^\s*$')
+DEPS=$(grep -Pv "^\s*#" "$REQUIRES" | grep -Pv '^\s*$')
echo "$DEPS" | sort -d -f
-
-
-
diff --git a/tools/read-version b/tools/read-version
index 323357fe..c76b24a9 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -27,5 +27,5 @@ then
exit 1
fi
-VERSION=$(grep -P "\d+.\d+.\d+:" $CHNG_LOG | cut -f1 -d ":" | head -n 1)
-echo $VERSION
+VERSION=$(grep -P "\d+.\d+.\d+:" "$CHNG_LOG" | cut -f1 -d ":" | head -n 1)
+echo "$VERSION"
diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf
index 118ffc1c..a94b1474 100644
--- a/upstart/cloud-init-nonet.conf
+++ b/upstart/cloud-init-nonet.conf
@@ -10,19 +10,60 @@ task
console output
script
- # /run/network/static-network-up-emitted is written by
- # upstart (via /etc/network/if-up.d/upstart). its presense would
- # indicate that static-network-up has already fired.
- EMITTED="/run/network/static-network-up-emitted"
- [ -e "$EMITTED" -o -e "/var/$EMITTED" ] && exit 0
+ set +e # you cannot trap TERM reliably with 'set -e'
+ SLEEP_CHILD=""
+ static_network_up() {
+ local emitted="/run/network/static-network-up-emitted"
+ # /run/network/static-network-up-emitted is written by
+ # upstart (via /etc/network/if-up.d/upstart). its presense would
+ # indicate that static-network-up has already fired.
+ [ -e "$emitted" -o -e "/var/$emitted" ]
+ }
+ msg() {
+ local uptime="" idle=""
+ if [ -r /proc/uptime ]; then
+ read uptime idle < /proc/uptime
+ fi
+ echo "$UPSTART_JOB${uptime:+[${uptime}]}:" "$1"
+ }
+
+ handle_sigterm() {
+ # if we received sigterm and static networking is up then it probably
+ # came from upstart as a result of 'stop on static-network-up'
+ if [ -n "$SLEEP_CHILD" ]; then
+ if ! kill $SLEEP_CHILD 2>/dev/null; then
+ [ ! -d "/proc/$SLEEP_CHILD" ] ||
+ msg "hm.. failed to kill sleep pid $SLEEP_CHILD"
+ fi
+ fi
+ if static_network_up; then
+ msg "static networking is now up"
+ exit 0
+ fi
+ msg "recieved SIGTERM, networking not up"
+ exit 2
+ }
+
+ dowait() {
+ msg "waiting $1 seconds for network device"
+ sleep "$1" &
+ SLEEP_CHILD=$!
+ wait $SLEEP_CHILD
+ SLEEP_CHILD=""
+ }
+
+ trap handle_sigterm TERM
+
+ # static_network_up already occurred
+ static_network_up && exit 0
+
+ # obj.pkl comes from cloud-init-local (or previous boot and
+ # manual_cache_clean)
[ -f /var/lib/cloud/instance/obj.pkl ] && exit 0
- short=10; long=120;
- sleep ${short}
- echo $UPSTART_JOB "waiting ${long} seconds for a network device."
- sleep ${long}
- echo $UPSTART_JOB "gave up waiting for a network device."
+ dowait 10
+ dowait 120
+ msg "gave up waiting for a network device."
: > /var/lib/cloud/data/no-net
end script
-# EOF