summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog9
-rw-r--r--Makefile23
-rwxr-xr-xbin/cloud-init6
-rw-r--r--cloudinit/config/cc_debug.py4
-rw-r--r--cloudinit/config/cc_growpart.py45
-rw-r--r--cloudinit/config/cc_power_state_change.py29
-rw-r--r--cloudinit/config/cc_resizefs.py40
-rw-r--r--cloudinit/config/cc_scripts_vendor.py43
-rw-r--r--cloudinit/distros/__init__.py1
-rw-r--r--cloudinit/distros/freebsd.py246
-rw-r--r--cloudinit/distros/net_util.py163
-rw-r--r--cloudinit/distros/rhel.py3
-rw-r--r--cloudinit/distros/rhel_util.py88
-rw-r--r--cloudinit/distros/sles.py3
-rw-r--r--cloudinit/ec2_utils.py202
-rw-r--r--cloudinit/handlers/__init__.py4
-rw-r--r--cloudinit/handlers/cloud_config.py2
-rw-r--r--cloudinit/handlers/shell_script.py2
-rw-r--r--cloudinit/helpers.py28
-rw-r--r--cloudinit/netinfo.py48
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py6
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py2
-rw-r--r--cloudinit/sources/__init__.py16
-rw-r--r--cloudinit/stages.py118
-rw-r--r--cloudinit/user_data.py6
-rw-r--r--cloudinit/util.py83
-rw-r--r--config/cloud.cfg1
-rw-r--r--doc/examples/cloud-config-landscape.txt7
-rw-r--r--doc/examples/cloud-config-vendor-data.txt16
-rw-r--r--doc/rtd/topics/datasources.rst4
-rw-r--r--doc/vendordata.txt53
-rwxr-xr-xpackages/bddeb1
-rwxr-xr-xpackages/brpm2
-rw-r--r--packages/debian/control.in1
-rw-r--r--packages/debian/copyright22
-rw-r--r--packages/redhat/cloud-init.spec.in1
-rw-r--r--packages/suse/cloud-init.spec.in1
-rw-r--r--requirements.txt (renamed from Requires)3
-rw-r--r--test-requirements.txt6
-rw-r--r--tests/unittests/test_data.py (renamed from tests/unittests/test_userdata.py)176
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py5
-rw-r--r--tests/unittests/test_ec2_util.py130
-rw-r--r--tests/unittests/test_runs/test_merge_run.py4
-rw-r--r--tests/unittests/test_runs/test_simple_run.py4
-rwxr-xr-xtools/read-dependencies45
-rwxr-xr-xtools/read-version46
47 files changed, 1443 insertions, 306 deletions
diff --git a/ChangeLog b/ChangeLog
index 46a27df3..e547426e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -13,6 +13,15 @@
redirect cloud-init stderr and stdout /var/log/cloud-init-output.log.
- drop support for resizing partitions with parted entirely (LP: #1212492).
This was broken as it was anyway.
+ - add support for vendordata.
+ - drop dependency on boto for crawling ec2 metadata service.
+ - add 'Requires' on sudo (for OpenNebula datasource) in rpm specs, and
+ 'Recommends' in the debian/control.in [Vlastimil Holer]
+ - if mount_info reports /dev/root is a device path for /, then convert
+ that to a device via help of kernel cmdline.
+ - configdrive: consider partitions as possible datasources if they have
+ theh correct filesystem label. [Paul Querna]
+ - initial freebsd support [Harm Weites]
0.7.4:
- fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
partitioned block device with target filesystem on ephemeral0.1.
diff --git a/Makefile b/Makefile
index 8cf1659a..c8b75e73 100644
--- a/Makefile
+++ b/Makefile
@@ -8,6 +8,8 @@ YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version)
CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()")
+PIP_INSTALL := pip install
+
ifeq ($(distro),)
distro = redhat
endif
@@ -23,7 +25,16 @@ pylint:
pyflakes:
pyflakes $(PY_FILES)
-test:
+pip-requirements:
+ @echo "Installing cloud-init dependencies..."
+ $(PIP_INSTALL) -r "$@.txt" -q
+
+pip-test-requirements:
+ @echo "Installing cloud-init test dependencies..."
+ $(PIP_INSTALL) -r "$@.txt" -q
+
+test: clean_pyc
+ @echo "Running tests..."
@nosetests $(noseopts) tests/
check_version:
@@ -32,12 +43,14 @@ check_version:
"not equal to code version $(CODE_VERSION)"; exit 2; \
else true; fi
+clean_pyc:
+ @find . -type f -name "*.pyc" -delete
+
2to3:
2to3 $(PY_FILES)
-clean:
- rm -rf /var/log/cloud-init.log \
- /var/lib/cloud/
+clean: clean_pyc
+ rm -rf /var/log/cloud-init.log /var/lib/cloud/
yaml:
@$(CWD)/tools/validate-yaml.py $(YAML_FILES)
@@ -49,4 +62,4 @@ deb:
./packages/bddeb
.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb yaml check_version
-
+.PHONY: pip-test-requirements pip-requirements clean_pyc
diff --git a/bin/cloud-init b/bin/cloud-init
index b4f9fd07..80a1df05 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -261,8 +261,8 @@ def main_init(name, args):
# Attempt to consume the data per instance.
# This may run user-data handlers and/or perform
# url downloads and such as needed.
- (ran, _results) = init.cloudify().run('consume_userdata',
- init.consume_userdata,
+ (ran, _results) = init.cloudify().run('consume_data',
+ init.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
if not ran:
@@ -271,7 +271,7 @@ def main_init(name, args):
#
# See: https://bugs.launchpad.net/bugs/819507 for a little
# reason behind this...
- init.consume_userdata(PER_ALWAYS)
+ init.consume_data(PER_ALWAYS)
except Exception:
util.logexc(LOG, "Consuming user data failed!")
return 1
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index cfd31fa1..7219b0f8 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -14,10 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
-from cloudinit import util
from cloudinit import type_utils
+from cloudinit import util
import copy
+from StringIO import StringIO
def _make_header(text):
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 6bddf847..b81951ad 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -114,6 +114,41 @@ class ResizeGrowPart(object):
return (before, get_size(partdev))
+class ResizeGpart(object):
+ def available(self):
+ if not util.which('gpart'):
+ return False
+ return True
+
+ def resize(self, diskdev, partnum, partdev):
+ """
+ GPT disks store metadata at the beginning (primary) and at the
+ end (secondary) of the disk. When launching an image with a
+ larger disk compared to the original image, the secondary copy
+ is lost. Thus, the metadata will be marked CORRUPT, and need to
+ be recovered.
+ """
+ try:
+ util.subp(["gpart", "recover", diskdev])
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 0:
+ util.logexc(LOG, "Failed: gpart recover %s", diskdev)
+ raise ResizeFailedException(e)
+
+ before = get_size(partdev)
+ try:
+ util.subp(["gpart", "resize", "-i", partnum, diskdev])
+ except util.ProcessExecutionError as e:
+ util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
+ raise ResizeFailedException(e)
+
+ # Since growing the FS requires a reboot, make sure we reboot
+ # first when this module has finished.
+ open('/var/run/reboot-required', 'a').close()
+
+ return (before, get_size(partdev))
+
+
def get_size(filename):
fd = os.open(filename, os.O_RDONLY)
try:
@@ -132,6 +167,12 @@ def device_part_info(devpath):
bname = os.path.basename(rpath)
syspath = "/sys/class/block/%s" % bname
+ # FreeBSD doesn't know of sysfs so just get everything we need from
+ # the device, like /dev/vtbd0p2.
+ if util.system_info()["platform"].startswith('FreeBSD'):
+ m = re.search('^(/dev/.+)p([0-9])$', devpath)
+ return (m.group(1), m.group(2))
+
if not os.path.exists(syspath):
raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
@@ -182,7 +223,7 @@ def resize_devices(resizer, devices):
"stat of '%s' failed: %s" % (blockdev, e),))
continue
- if not stat.S_ISBLK(statret.st_mode):
+ if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
info.append((devent, RESIZE.SKIPPED,
"device '%s' not a block device" % blockdev,))
continue
@@ -255,4 +296,4 @@ def handle(_name, cfg, _cloud, log, _args):
else:
log.debug("'%s' %s: %s" % (entry, action, msg))
-RESIZERS = (('growpart', ResizeGrowPart),)
+RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index e3150808..561c5abd 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -22,6 +22,7 @@ from cloudinit import util
import errno
import os
import re
+import signal
import subprocess
import time
@@ -30,6 +31,24 @@ frequency = PER_INSTANCE
EXIT_FAIL = 254
+def givecmdline(pid):
+ # Returns the cmdline for the given process id. In Linux we can use procfs
+ # for this but on BSD there is /usr/bin/procstat.
+ try:
+ # Example output from procstat -c 1
+ # PID COMM ARGS
+ # 1 init /bin/init --
+ if util.system_info()["platform"].startswith('FreeBSD'):
+ (output, _err) = util.subp(['procstat', '-c', str(pid)])
+ line = output.splitlines()[1]
+ m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
+ return m.group(2)
+ else:
+ return util.load_file("/proc/%s/cmdline" % pid)
+ except IOError:
+ return None
+
+
def handle(_name, cfg, _cloud, log, _args):
try:
@@ -42,8 +61,8 @@ def handle(_name, cfg, _cloud, log, _args):
return
mypid = os.getpid()
- cmdline = util.load_file("/proc/%s/cmdline" % mypid)
+ cmdline = givecmdline(mypid)
if not cmdline:
log.warn("power_state: failed to get cmdline of current process")
return
@@ -119,8 +138,6 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
msg = None
end_time = time.time() + timeout
- cmdline_f = "/proc/%s/cmdline" % pid
-
def fatal(msg):
if log:
log.warn(msg)
@@ -134,16 +151,14 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
break
try:
- cmdline = ""
- with open(cmdline_f) as fp:
- cmdline = fp.read()
+ cmdline = givecmdline(pid)
if cmdline != pidcmdline:
msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
break
except IOError as ioerr:
if ioerr.errno in known_errnos:
- msg = "pidfile '%s' gone [%d]" % (cmdline_f, ioerr.errno)
+ msg = "pidfile gone [%d]" % ioerr.errno
else:
fatal("IOError during wait: %s" % ioerr)
break
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 56040fdd..be406034 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -39,6 +39,10 @@ def _resize_ext(mount_point, devpth): # pylint: disable=W0613
def _resize_xfs(mount_point, devpth): # pylint: disable=W0613
return ('xfs_growfs', devpth)
+
+def _resize_ufs(mount_point, devpth): # pylint: disable=W0613
+ return ('growfs', devpth)
+
# Do not use a dictionary as these commands should be able to be used
# for multiple filesystem types if possible, e.g. one command for
# ext2, ext3 and ext4.
@@ -46,11 +50,31 @@ RESIZE_FS_PREFIXES_CMDS = [
('btrfs', _resize_btrfs),
('ext', _resize_ext),
('xfs', _resize_xfs),
+ ('ufs', _resize_ufs),
]
NOBLOCK = "noblock"
+def rootdev_from_cmdline(cmdline):
+ found = None
+ for tok in cmdline.split():
+ if tok.startswith("root="):
+ found = tok[5:]
+ break
+ if found is None:
+ return None
+
+ if found.startswith("/dev/"):
+ return found
+ if found.startswith("LABEL="):
+ return "/dev/disk/by-label/" + found[len("LABEL="):]
+ if found.startswith("UUID="):
+ return "/dev/disk/by-uuid/" + found[len("UUID="):]
+
+ return "/dev/" + found
+
+
def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
resize_root = args[0]
@@ -78,10 +102,20 @@ def handle(name, cfg, _cloud, log, args):
info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
log.debug("resize_info: %s" % info)
+ container = util.is_container()
+
+ if (devpth == "/dev/root" and not os.path.exists(devpth) and
+ not container):
+ devpth = rootdev_from_cmdline(util.get_cmdline())
+ if devpth is None:
+ log.warn("Unable to find device '/dev/root'")
+ return
+ log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
+
try:
statret = os.stat(devpth)
except OSError as exc:
- if util.is_container() and exc.errno == errno.ENOENT:
+ if container and exc.errno == errno.ENOENT:
log.debug("Device '%s' did not exist in container. "
"cannot resize: %s" % (devpth, info))
elif exc.errno == errno.ENOENT:
@@ -91,8 +125,8 @@ def handle(name, cfg, _cloud, log, args):
raise exc
return
- if not stat.S_ISBLK(statret.st_mode):
- if util.is_container():
+ if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
+ if container:
log.debug("device '%s' not a block device in container."
" cannot resize: %s" % (devpth, info))
else:
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
new file mode 100644
index 00000000..0c9e504e
--- /dev/null
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -0,0 +1,43 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+SCRIPT_SUBDIR = 'vendor'
+
+
+def handle(name, cfg, cloud, log, _args):
+ # This is written to by the vendor data handlers
+ # any vendor data shell scripts get placed in runparts_path
+ runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
+ SCRIPT_SUBDIR)
+
+ prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
+
+ try:
+ util.runparts(runparts_path, exe_prefix=prefix)
+ except:
+ log.warn("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
+ raise
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 74e95797..46b67fa3 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -39,6 +39,7 @@ from cloudinit.distros.parsers import hosts
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'redhat': ['fedora', 'rhel'],
+ 'freebsd': ['freebsd'],
'suse': ['sles']
}
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
new file mode 100644
index 00000000..f1650a77
--- /dev/null
+++ b/cloudinit/distros/freebsd.py
@@ -0,0 +1,246 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 Harm Weites
+#
+# Author: Harm Weites <harm@weites.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import netinfo
+from cloudinit import ssh_util
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = 'freebsd'
+
+ # Updates a key in /etc/rc.conf.
+ def updatercconf(self, key, value):
+ LOG.debug("updatercconf: %s => %s" % (key, value))
+ conf = self.loadrcconf()
+ configchanged = False
+ for item in conf:
+ if item == key and conf[item] != value:
+ conf[item] = value
+ LOG.debug("[rc.conf]: Value %s for key %s needs to be changed" % (value, key))
+ configchanged = True
+
+ if configchanged:
+ LOG.debug("Writing new /etc/rc.conf file")
+ with open('/etc/rc.conf', 'w') as file:
+ for keyval in conf.items():
+ file.write("%s=%s\n" % keyval)
+
+ # Load the contents of /etc/rc.conf and store all keys in a dict.
+ def loadrcconf(self):
+ conf = {}
+ with open("/etc/rc.conf") as file:
+ for line in file:
+ tok = line.split('=')
+ conf[tok[0]] = tok[1].rstrip()
+ return conf
+
+ def readrcconf(self, key):
+ conf = self.loadrcconf()
+ try:
+ val = conf[key]
+ except KeyError:
+ val = None
+ return val
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname()
+ return ('rc.conf', sys_hostname)
+
+ def _read_hostname(self, default=None):
+ hostname = None
+ try:
+ hostname = self.readrcconf('hostname')
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
+
+ def _select_hostname(self, hostname, fqdn):
+ if not hostname:
+ return fqdn
+ return hostname
+
+ def _write_hostname(self, your_hostname, out_fn):
+ self.updatercconf('hostname', your_hostname)
+
+ def create_group(self, name, members):
+ group_add_cmd = ['pw', '-n', name]
+ if util.is_group(name):
+ LOG.warn("Skipping creation of existing group '%s'" % name)
+ else:
+ try:
+ util.subp(group_add_cmd)
+ LOG.info("Created new group %s" % name)
+ except Exception:
+ util.logexc("Failed to create group %s", name)
+
+ if len(members) > 0:
+ for member in members:
+ if not util.is_user(member):
+ LOG.warn("Unable to add group member '%s' to group '%s'"
+ "; user does not exist.", member, name)
+ continue
+ util.subp(['pw', 'usermod', '-n', name, '-G', member])
+ LOG.info("Added user '%s' to group '%s'" % (member, name))
+
+ def add_user(self, name, **kwargs):
+ if util.is_user(name):
+ LOG.info("User %s already exists, skipping." % name)
+ return False
+
+ adduser_cmd = ['pw', 'useradd', '-n', name]
+ log_adduser_cmd = ['pw', 'useradd', '-n', name]
+
+ adduser_opts = {
+ "homedir": '-d',
+ "gecos": '-c',
+ "primary_group": '-g',
+ "groups": '-G',
+ "passwd": '-h',
+ "shell": '-s',
+ "inactive": '-E',
+ }
+ adduser_flags = {
+ "no_user_group": '--no-user-group',
+ "system": '--system',
+ "no_log_init": '--no-log-init',
+ }
+
+ redact_opts = ['passwd']
+
+ for key, val in kwargs.iteritems():
+ if key in adduser_opts and val and isinstance(val, str):
+ adduser_cmd.extend([adduser_opts[key], val])
+
+ # Redact certain fields from the logs
+ if key in redact_opts:
+ log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
+ else:
+ log_adduser_cmd.extend([adduser_opts[key], val])
+
+ elif key in adduser_flags and val:
+ adduser_cmd.append(adduser_flags[key])
+ log_adduser_cmd.append(adduser_flags[key])
+
+ if 'no_create_home' in kwargs or 'system' in kwargs:
+ adduser_cmd.append('-d/nonexistent')
+ log_adduser_cmd.append('-d/nonexistent')
+ else:
+ adduser_cmd.append('-d/usr/home/%s' % name)
+ adduser_cmd.append('-m')
+ log_adduser_cmd.append('-d/usr/home/%s' % name)
+ log_adduser_cmd.append('-m')
+
+ # Run the command
+ LOG.info("Adding user %s", name)
+ try:
+ util.subp(adduser_cmd, logstring=log_adduser_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to create user %s", name)
+ raise e
+
+ # TODO:
+ def set_passwd(self, name, **kwargs):
+ return False
+
+ def lock_passwd(self, name):
+ try:
+ util.subp(['pw', 'usermod', name, '-h', '-'])
+ except Exception as e:
+ util.logexc(LOG, "Failed to lock user %s", name)
+ raise e
+
+ # TODO:
+ def write_sudo_rules(self, name, rules, sudo_file=None):
+ LOG.debug("[write_sudo_rules] Name: %s" % name)
+
+ def create_user(self, name, **kwargs):
+ self.add_user(name, **kwargs)
+
+ # Set password if plain-text password provided and non-empty
+ if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
+ self.set_passwd(name, kwargs['plain_text_passwd'])
+
+ # Default locking down the account. 'lock_passwd' defaults to True.
+ # lock account unless lock_password is False.
+ if kwargs.get('lock_passwd', True):
+ self.lock_passwd(name)
+
+ # Configure sudo access
+ if 'sudo' in kwargs:
+ self.write_sudo_rules(name, kwargs['sudo'])
+
+ # Import SSH keys
+ if 'ssh_authorized_keys' in kwargs:
+ keys = set(kwargs['ssh_authorized_keys']) or []
+ ssh_util.setup_user_keys(keys, name, options=None)
+
+ def _write_network(self, settings):
+ return
+
+ def apply_locale(self, locale, out_fn=None):
+ loginconf = '/etc/login.conf'
+ newloginconf = '/tmp/login.conf.new'
+ backupconf = '/etc/login.conf.orig'
+
+ newconf = open(newloginconf, 'w')
+ origconf = open(loginconf, 'r')
+
+ for line in origconf:
+ newconf.write(re.sub('^default:', r'default:lang=%s:' % locale, line))
+ newconf.close()
+ origconf.close()
+ # Make a backup of login.conf.
+ copyfile(loginconf, backupconf)
+ # And copy the new login.conf.
+ copyfile(newloginconf, loginconf)
+
+ try:
+ util.logexc("Running cap_mkdb for %s", locale)
+ util.subp(['cap_mkdb', '/etc/login.conf'])
+ except:
+ # cap_mkdb failed, so restore the backup.
+ util.logexc("Failed to apply locale %s", locale)
+ copyfile(backupconf, loginconf)
+
+ def install_packages():
+ return
+
+ def package_command():
+ return
+
+ def set_timezone():
+ return
+
+ def update_package_sources():
+ return
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
new file mode 100644
index 00000000..5f60666d
--- /dev/null
+++ b/cloudinit/distros/net_util.py
@@ -0,0 +1,163 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# This is a util function to translate debian based distro interface blobs as
+# given in /etc/network/interfaces to an *somewhat* agnostic format for
+# distributions that use other formats.
+#
+# TODO(harlowja) remove when we have python-netcf active...
+#
+# The format is the following:
+# {
+# <device-name>: {
+# # All optional (if not existent in original format)
+# "netmask": <ip>,
+# "broadcast": <ip>,
+# "gateway": <ip>,
+# "address": <ip>,
+# "bootproto": "static"|"dhcp",
+# "dns-search": <hostname>,
+# "hwaddress": <mac-address>,
+# "auto": True (or non-existent),
+# "dns-nameservers": [<ip/hostname>, ...],
+# }
+# }
+#
+# Things to note, comments are removed, if a ubuntu/debian interface is
+# marked as auto then only then first segment (?) is retained, ie
+# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1).
+#
+# Example input:
+#
+# auto lo
+# iface lo inet loopback
+#
+# auto eth0
+# iface eth0 inet static
+# address 10.0.0.1
+# netmask 255.255.252.0
+# broadcast 10.0.0.255
+# gateway 10.0.0.2
+# dns-nameservers 98.0.0.1 98.0.0.2
+#
+# Example output:
+# {
+# "lo": {
+# "auto": true
+# },
+# "eth0": {
+# "auto": true,
+# "dns-nameservers": [
+# "98.0.0.1",
+# "98.0.0.2"
+# ],
+# "broadcast": "10.0.0.255",
+# "netmask": "255.255.252.0",
+# "bootproto": "static",
+# "address": "10.0.0.1",
+# "gateway": "10.0.0.2"
+# }
+# }
+
+def translate_network(settings):
+ # Get the standard cmd, args from the ubuntu format
+ entries = []
+ for line in settings.splitlines():
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ split_up = line.split(None, 1)
+ if len(split_up) <= 1:
+ continue
+ entries.append(split_up)
+ # Figure out where each iface section is
+ ifaces = []
+ consume = {}
+ for (cmd, args) in entries:
+ if cmd == 'iface':
+ if consume:
+ ifaces.append(consume)
+ consume = {}
+ consume[cmd] = args
+ else:
+ consume[cmd] = args
+ # Check if anything left over to consume
+ absorb = False
+ for (cmd, args) in consume.iteritems():
+ if cmd == 'iface':
+ absorb = True
+ if absorb:
+ ifaces.append(consume)
+ # Now translate
+ real_ifaces = {}
+ for info in ifaces:
+ if 'iface' not in info:
+ continue
+ iface_details = info['iface'].split(None)
+ dev_name = None
+ if len(iface_details) >= 1:
+ dev = iface_details[0].strip().lower()
+ if dev:
+ dev_name = dev
+ if not dev_name:
+ continue
+ iface_info = {}
+ if len(iface_details) >= 3:
+ proto_type = iface_details[2].strip().lower()
+ # Seems like this can be 'loopback' which we don't
+ # really care about
+ if proto_type in ['dhcp', 'static']:
+ iface_info['bootproto'] = proto_type
+ # These can just be copied over
+ for k in ['netmask', 'address', 'gateway', 'broadcast']:
+ if k in info:
+ val = info[k].strip().lower()
+ if val:
+ iface_info[k] = val
+ # Name server info provided??
+ if 'dns-nameservers' in info:
+ iface_info['dns-nameservers'] = info['dns-nameservers'].split()
+ # Name server search info provided??
+ if 'dns-search' in info:
+ iface_info['dns-search'] = info['dns-search'].split()
+ # Is any mac address spoofing going on??
+ if 'hwaddress' in info:
+ hw_info = info['hwaddress'].lower().strip()
+ hw_split = hw_info.split(None, 1)
+ if len(hw_split) == 2 and hw_split[0].startswith('ether'):
+ hw_addr = hw_split[1]
+ if hw_addr:
+ iface_info['hwaddress'] = hw_addr
+ real_ifaces[dev_name] = iface_info
+ # Check for those that should be started on boot via 'auto'
+ for (cmd, args) in entries:
+ if cmd == 'auto':
+ # Seems like auto can be like 'auto eth0 eth0:1' so just get the
+ # first part out as the device name
+ args = args.split(None)
+ if not args:
+ continue
+ dev_name = args[0].strip().lower()
+ if dev_name in real_ifaces:
+ real_ifaces[dev_name]['auto'] = True
+ return real_ifaces
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 30195384..e8abf111 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -25,6 +25,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@@ -63,7 +64,7 @@ class Distro(distros.Distro):
def _write_network(self, settings):
# TODO(harlowja) fix this... since this is the ubuntu format
- entries = rhel_util.translate_network(settings)
+ entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the rhel format...
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
index 1aba58b8..063d536e 100644
--- a/cloudinit/distros/rhel_util.py
+++ b/cloudinit/distros/rhel_util.py
@@ -30,94 +30,6 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
-# This is a util function to translate Debian based distro interface blobs as
-# given in /etc/network/interfaces to an equivalent format for distributions
-# that use ifcfg-* style (Red Hat and SUSE).
-# TODO(harlowja) remove when we have python-netcf active...
-def translate_network(settings):
- # Get the standard cmd, args from the ubuntu format
- entries = []
- for line in settings.splitlines():
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- split_up = line.split(None, 1)
- if len(split_up) <= 1:
- continue
- entries.append(split_up)
- # Figure out where each iface section is
- ifaces = []
- consume = {}
- for (cmd, args) in entries:
- if cmd == 'iface':
- if consume:
- ifaces.append(consume)
- consume = {}
- consume[cmd] = args
- else:
- consume[cmd] = args
- # Check if anything left over to consume
- absorb = False
- for (cmd, args) in consume.iteritems():
- if cmd == 'iface':
- absorb = True
- if absorb:
- ifaces.append(consume)
- # Now translate
- real_ifaces = {}
- for info in ifaces:
- if 'iface' not in info:
- continue
- iface_details = info['iface'].split(None)
- dev_name = None
- if len(iface_details) >= 1:
- dev = iface_details[0].strip().lower()
- if dev:
- dev_name = dev
- if not dev_name:
- continue
- iface_info = {}
- if len(iface_details) >= 3:
- proto_type = iface_details[2].strip().lower()
- # Seems like this can be 'loopback' which we don't
- # really care about
- if proto_type in ['dhcp', 'static']:
- iface_info['bootproto'] = proto_type
- # These can just be copied over
- for k in ['netmask', 'address', 'gateway', 'broadcast']:
- if k in info:
- val = info[k].strip().lower()
- if val:
- iface_info[k] = val
- # Name server info provided??
- if 'dns-nameservers' in info:
- iface_info['dns-nameservers'] = info['dns-nameservers'].split()
- # Name server search info provided??
- if 'dns-search' in info:
- iface_info['dns-search'] = info['dns-search'].split()
- # Is any mac address spoofing going on??
- if 'hwaddress' in info:
- hw_info = info['hwaddress'].lower().strip()
- hw_split = hw_info.split(None, 1)
- if len(hw_split) == 2 and hw_split[0].startswith('ether'):
- hw_addr = hw_split[1]
- if hw_addr:
- iface_info['hwaddress'] = hw_addr
- real_ifaces[dev_name] = iface_info
- # Check for those that should be started on boot via 'auto'
- for (cmd, args) in entries:
- if cmd == 'auto':
- # Seems like auto can be like 'auto eth0 eth0:1' so just get the
- # first part out as the device name
- args = args.split(None)
- if not args:
- continue
- dev_name = args[0].strip().lower()
- if dev_name in real_ifaces:
- real_ifaces[dev_name]['auto'] = True
- return real_ifaces
-
-
# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
def update_sysconfig_file(fn, adjustments, allow_empty=False):
if not adjustments:
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index f2ac4efc..9788a1ba 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -26,6 +26,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@@ -54,7 +55,7 @@ class Distro(distros.Distro):
def _write_network(self, settings):
# Convert debian settings to ifcfg format
- entries = rhel_util.translate_network(settings)
+ entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the suse format...
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index fcd511c5..92a22747 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -16,48 +16,160 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import boto.utils as boto_utils
-
-# Versions of boto >= 2.6.0 (and possibly 2.5.2)
-# try to lazily load the metadata backing, which
-# doesn't work so well in cloud-init especially
-# since the metadata is serialized and actions are
-# performed where the metadata server may be blocked
-# (thus the datasource will start failing) resulting
-# in url exceptions when fields that do exist (or
-# would have existed) do not exist due to the blocking
-# that occurred.
-
-# TODO(harlowja): https://github.com/boto/boto/issues/1401
-# When boto finally moves to using requests, we should be able
-# to provide it ssl details, it does not yet, so we can't provide them...
-
-
-def _unlazy_dict(mp):
- if not isinstance(mp, (dict)):
- return mp
- # Walk over the keys/values which
- # forces boto to unlazy itself and
- # has no effect on dictionaries that
- # already have there items.
- for (_k, v) in mp.items():
- _unlazy_dict(v)
- return mp
-
-
-def get_instance_userdata(api_version, metadata_address):
- # Note: boto.utils.get_instance_metadata returns '' for empty string
- # so the change from non-true to '' is not specifically necessary, but
- # this way cloud-init will get consistent behavior even if boto changed
- # in the future to return a None on "no user-data provided".
- ud = boto_utils.get_instance_userdata(api_version, None, metadata_address)
- if not ud:
- ud = ''
- return ud
-
-
-def get_instance_metadata(api_version, metadata_address):
- metadata = boto_utils.get_instance_metadata(api_version, metadata_address)
- if not isinstance(metadata, (dict)):
- metadata = {}
- return _unlazy_dict(metadata)
+from urlparse import (urlparse, urlunparse)
+
+import functools
+import json
+import urllib
+
+from cloudinit import log as logging
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+def maybe_json_object(text):
+ if not text:
+ return False
+ text = text.strip()
+ if text.startswith("{") and text.endswith("}"):
+ return True
+ return False
+
+
+def combine_url(base, add_on):
+ base_parsed = list(urlparse(base))
+ path = base_parsed[2]
+ if path and not path.endswith("/"):
+ path += "/"
+ path += urllib.quote(str(add_on), safe="/:")
+ base_parsed[2] = path
+ return urlunparse(base_parsed)
+
+
+# See: http://bit.ly/TyoUQs
+#
+class MetadataMaterializer(object):
+ def __init__(self, blob, base_url, caller):
+ self._blob = blob
+ self._md = None
+ self._base_url = base_url
+ self._caller = caller
+
+ def _parse(self, blob):
+ leaves = {}
+ children = []
+ if not blob:
+ return (leaves, children)
+
+ def has_children(item):
+ if item.endswith("/"):
+ return True
+ else:
+ return False
+
+ def get_name(item):
+ if item.endswith("/"):
+ return item.rstrip("/")
+ return item
+
+ for field in blob.splitlines():
+ field = field.strip()
+ field_name = get_name(field)
+ if not field or not field_name:
+ continue
+ if has_children(field):
+ if field_name not in children:
+ children.append(field_name)
+ else:
+ contents = field.split("=", 1)
+ resource = field_name
+ if len(contents) > 1:
+ # What a PITA...
+ (ident, sub_contents) = contents
+ ident = util.safe_int(ident)
+ if ident is not None:
+ resource = "%s/openssh-key" % (ident)
+ field_name = sub_contents
+ leaves[field_name] = resource
+ return (leaves, children)
+
+ def materialize(self):
+ if self._md is not None:
+ return self._md
+ self._md = self._materialize(self._blob, self._base_url)
+ return self._md
+
+ def _decode_leaf_blob(self, field, blob):
+ if not blob:
+ return blob
+ if maybe_json_object(blob):
+ try:
+ # Assume it's json, unless it fails parsing...
+ return json.loads(blob)
+ except (ValueError, TypeError) as e:
+ LOG.warn("Field %s looked like a json object, but it was"
+ " not: %s", field, e)
+ if blob.find("\n") != -1:
+ return blob.splitlines()
+ return blob
+
+ def _materialize(self, blob, base_url):
+ (leaves, children) = self._parse(blob)
+ child_contents = {}
+ for c in children:
+ child_url = combine_url(base_url, c)
+ if not child_url.endswith("/"):
+ child_url += "/"
+ child_blob = str(self._caller(child_url))
+ child_contents[c] = self._materialize(child_blob, child_url)
+ leaf_contents = {}
+ for (field, resource) in leaves.items():
+ leaf_url = combine_url(base_url, resource)
+ leaf_blob = str(self._caller(leaf_url))
+ leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob)
+ joined = {}
+ joined.update(child_contents)
+ for field in leaf_contents.keys():
+ if field in joined:
+ LOG.warn("Duplicate key found in results from %s", base_url)
+ else:
+ joined[field] = leaf_contents[field]
+ return joined
+
+
+def get_instance_userdata(api_version='latest',
+ metadata_address='http://169.254.169.254',
+ ssl_details=None, timeout=5, retries=5):
+ ud_url = combine_url(metadata_address, api_version)
+ ud_url = combine_url(ud_url, 'user-data')
+ try:
+ response = util.read_file_or_url(ud_url,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries)
+ return str(response)
+ except Exception:
+ util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
+ return ''
+
+
+def get_instance_metadata(api_version='latest',
+ metadata_address='http://169.254.169.254',
+ ssl_details=None, timeout=5, retries=5):
+ md_url = combine_url(metadata_address, api_version)
+ md_url = combine_url(md_url, 'meta-data')
+ caller = functools.partial(util.read_file_or_url,
+ ssl_details=ssl_details, timeout=timeout,
+ retries=retries)
+
+ try:
+ response = caller(md_url)
+ materializer = MetadataMaterializer(str(response), md_url, caller)
+ md = materializer.materialize()
+ if not isinstance(md, (dict)):
+ md = {}
+ return md
+ except Exception:
+ util.logexc(LOG, "Failed fetching metadata from url %s", md_url)
+ return {}
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 2ddc75f4..059d7495 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -187,6 +187,10 @@ def _escape_string(text):
def walker_callback(data, filename, payload, headers):
content_type = headers['Content-Type']
+ if content_type in data.get('excluded'):
+ LOG.debug('content_type "%s" is excluded', content_type)
+ return
+
if content_type in PART_CONTENT_TYPES:
walker_handle_handler(data, content_type, filename, payload)
return
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 34a73115..4232700f 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -66,6 +66,8 @@ class CloudConfigPartHandler(handlers.Handler):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.cloud_buf = None
self.cloud_fn = paths.get_ipath("cloud_config")
+ if 'cloud_config_path' in _kwargs:
+ self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
self.file_names = []
def list_types(self):
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index 62289d98..30c1ed89 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -36,6 +36,8 @@ class ShellScriptPartHandler(handlers.Handler):
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.script_dir = paths.get_ipath_cur('scripts')
+ if 'script_path' in _kwargs:
+ self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
def list_types(self):
return [
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index e5eac6a7..e701126e 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -200,11 +200,13 @@ class Runners(object):
class ConfigMerger(object):
def __init__(self, paths=None, datasource=None,
- additional_fns=None, base_cfg=None):
+ additional_fns=None, base_cfg=None,
+ include_vendor=True):
self._paths = paths
self._ds = datasource
self._fns = additional_fns
self._base_cfg = base_cfg
+ self._include_vendor = include_vendor
# Created on first use
self._cfg = None
@@ -237,13 +239,19 @@ class ConfigMerger(object):
# a configuration file to use when running...
if not self._paths:
return i_cfgs
- cc_fn = self._paths.get_ipath_cur('cloud_config')
- if cc_fn and os.path.isfile(cc_fn):
- try:
- i_cfgs.append(util.read_conf(cc_fn))
- except:
- util.logexc(LOG, 'Failed loading of cloud-config from %s',
- cc_fn)
+
+ cc_paths = ['cloud_config']
+ if self._include_vendor:
+ cc_paths.append('vendor_cloud_config')
+
+ for cc_p in cc_paths:
+ cc_fn = self._paths.get_ipath_cur(cc_p)
+ if cc_fn and os.path.isfile(cc_fn):
+ try:
+ i_cfgs.append(util.read_conf(cc_fn))
+ except:
+ util.logexc(LOG, 'Failed loading of cloud-config from %s',
+ cc_fn)
return i_cfgs
def _read_cfg(self):
@@ -331,13 +339,17 @@ class Paths(object):
self.lookups = {
"handlers": "handlers",
"scripts": "scripts",
+ "vendor_scripts": "scripts/vendor",
"sem": "sem",
"boothooks": "boothooks",
"userdata_raw": "user-data.txt",
"userdata": "user-data.txt.i",
"obj_pkl": "obj.pkl",
"cloud_config": "cloud-config.txt",
+ "vendor_cloud_config": "vendor-cloud-config.txt",
"data": "data",
+ "vendordata_raw": "vendor-data.txt",
+ "vendordata": "vendor-data.txt.i",
}
# Set when a datasource becomes active
self.datasource = ds
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index feba5a62..63f720e4 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -21,6 +21,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cloudinit.util as util
+import re
from prettytable import PrettyTable
@@ -40,27 +41,43 @@ def netdev_info(empty=""):
toks = line.lower().strip().split()
if toks[0] == "up":
devs[curdev]['up'] = True
+ # If the output of ifconfig doesn't contain the required info in the
+ # obvious place, use a regex filter to be sure.
+ elif len(toks) > 1:
+ if re.search("flags=\d+<up,", toks[1]):
+ devs[curdev]['up'] = True
fieldpost = ""
if toks[0] == "inet6":
fieldpost = "6"
for i in range(len(toks)):
- if toks[i] == "hwaddr":
+ if toks[i] == "hwaddr" or toks[i] == "ether":
try:
devs[curdev]["hwaddr"] = toks[i + 1]
except IndexError:
pass
- for field in ("addr", "bcast", "mask"):
+
+ """
+ Couple the different items we're interested in with the correct field
+ since FreeBSD/CentOS/Fedora differ in the output.
+ """
+
+ ifconfigfields = {
+ "addr:": "addr", "inet": "addr",
+ "bcast:": "bcast", "broadcast": "bcast",
+ "mask:": "mask", "netmask": "mask"
+ }
+ for origfield, field in ifconfigfields.items():
target = "%s%s" % (field, fieldpost)
if devs[curdev].get(target, ""):
continue
- if toks[i] == "%s:" % field:
+ if toks[i] == "%s" % origfield:
try:
devs[curdev][target] = toks[i + 1]
except IndexError:
pass
- elif toks[i].startswith("%s:" % field):
+ elif toks[i].startswith("%s" % origfield):
devs[curdev][target] = toks[i][len(field) + 1:]
if empty != "":
@@ -73,15 +90,33 @@ def netdev_info(empty=""):
def route_info():
- (route_out, _err) = util.subp(["route", "-n"])
+ (route_out, _err) = util.subp(["netstat", "-rn"])
routes = []
entries = route_out.splitlines()[1:]
for line in entries:
if not line:
continue
toks = line.split()
- if len(toks) < 8 or toks[0] == "Kernel" or toks[0] == "Destination":
+
+ """
+ FreeBSD shows 6 items in the routing table:
+ Destination Gateway Flags Refs Use Netif Expire
+ default 10.65.0.1 UGS 0 34920 vtnet0
+
+ Linux netstat shows 2 more:
+ Destination Gateway Genmask Flags MSS Window irtt Iface
+ 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
+ """
+
+ if len(toks) < 6 or toks[0] == "Kernel" or toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Internet6" or toks[0] == "Routing":
continue
+
+ if len(toks) < 8:
+ toks.append("-")
+ toks.append("-")
+ toks[7] = toks[5]
+ toks[5] = "-"
+
entry = {
'destination': toks[0],
'gateway': toks[1],
@@ -92,6 +127,7 @@ def route_info():
'use': toks[6],
'iface': toks[7],
}
+
routes.append(entry)
return routes
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 5df7f557..7be2199a 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -52,6 +52,7 @@ CFG_BUILTIN = {
},
'distro': 'ubuntu',
},
+ 'vendor_data': {'enabled': True, 'prefix': []},
}
# Valid frequencies of handlers/modules
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 4f437244..2a244496 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -284,8 +284,10 @@ def find_candidate_devs():
# followed by fstype items, but with dupes removed
combined = (by_label + [d for d in by_fstype if d not in by_label])
- # We are looking for block device (sda, not sda1), ignore partitions
- combined = [d for d in combined if not util.is_partition(d)]
+ # We are looking for a block device or partition with necessary label or
+ # an unpartitioned block device.
+ combined = [d for d in combined
+ if d in by_label or not util.is_partition(d)]
return combined
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 551b20c4..6593ce6e 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -47,6 +47,7 @@ SMARTOS_ATTRIB_MAP = {
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
'availability_zone': ('datacenter_name', True),
+ 'vendordata': ('sdc:operator-script', False),
}
DS_NAME = 'SmartOS'
@@ -154,6 +155,7 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
+ self.vendordata_raw = md['vendordata']
return True
def device_name_to_device(self, name):
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 7dc1fbde..fef4d460 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -53,6 +53,8 @@ class DataSource(object):
self.userdata = None
self.metadata = None
self.userdata_raw = None
+ self.vendordata = None
+ self.vendordata_raw = None
# find the datasource config name.
# remove 'DataSource' from classname on front, and remove 'Net' on end.
@@ -77,9 +79,14 @@ class DataSource(object):
if self.userdata is None:
self.userdata = self.ud_proc.process(self.get_userdata_raw())
if apply_filter:
- return self._filter_userdata(self.userdata)
+ return self._filter_xdata(self.userdata)
return self.userdata
+ def get_vendordata(self):
+ if self.vendordata is None:
+ self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
+ return self.vendordata
+
@property
def launch_index(self):
if not self.metadata:
@@ -88,7 +95,7 @@ class DataSource(object):
return self.metadata['launch-index']
return None
- def _filter_userdata(self, processed_ud):
+ def _filter_xdata(self, processed_ud):
filters = [
launch_index.Filter(util.safe_int(self.launch_index)),
]
@@ -104,6 +111,9 @@ class DataSource(object):
def get_userdata_raw(self):
return self.userdata_raw
+ def get_vendordata_raw(self):
+ return self.vendordata_raw
+
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
@@ -119,7 +129,7 @@ class DataSource(object):
# when the kernel named them 'vda' or 'xvda'
# we want to return the correct value for what will actually
# exist in this instance
- mappings = {"sd": ("vd", "xvd")}
+ mappings = {"sd": ("vd", "xvd", "vtb")}
for (nfrom, tlist) in mappings.iteritems():
if not short_name.startswith(nfrom):
continue
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 07c55802..593b72a2 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -123,6 +123,7 @@ class Init(object):
os.path.join(c_dir, 'scripts', 'per-instance'),
os.path.join(c_dir, 'scripts', 'per-once'),
os.path.join(c_dir, 'scripts', 'per-boot'),
+ os.path.join(c_dir, 'scripts', 'vendor'),
os.path.join(c_dir, 'seed'),
os.path.join(c_dir, 'instances'),
os.path.join(c_dir, 'handlers'),
@@ -319,6 +320,7 @@ class Init(object):
if not self._write_to_cache():
return
self._store_userdata()
+ self._store_vendordata()
def _store_userdata(self):
raw_ud = "%s" % (self.datasource.get_userdata_raw())
@@ -326,11 +328,20 @@ class Init(object):
processed_ud = "%s" % (self.datasource.get_userdata())
util.write_file(self._get_ipath('userdata'), processed_ud, 0600)
- def _default_userdata_handlers(self):
- opts = {
+ def _store_vendordata(self):
+ raw_vd = "%s" % (self.datasource.get_vendordata_raw())
+ util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0600)
+ processed_vd = "%s" % (self.datasource.get_vendordata())
+ util.write_file(self._get_ipath('vendordata'), processed_vd, 0600)
+
+ def _default_handlers(self, opts=None):
+ if opts is None:
+ opts = {}
+
+ opts.update({
'paths': self.paths,
'datasource': self.datasource,
- }
+ })
# TODO(harlowja) Hmmm, should we dynamically import these??
def_handlers = [
cc_part.CloudConfigPartHandler(**opts),
@@ -340,7 +351,23 @@ class Init(object):
]
return def_handlers
- def consume_userdata(self, frequency=PER_INSTANCE):
+ def _default_userdata_handlers(self):
+ return self._default_handlers()
+
+ def _default_vendordata_handlers(self):
+ return self._default_handlers(
+ opts={'script_path': 'vendor_scripts',
+ 'cloud_config_path': 'vendor_cloud_config'})
+
+ def _do_handlers(self, data_msg, c_handlers_list, frequency,
+ excluded=None):
+ """
+ Generalized handlers suitable for use with either vendordata
+ or userdata
+ """
+ if excluded is None:
+ excluded = []
+
cdir = self.paths.get_cpath("handlers")
idir = self._get_ipath("handlers")
@@ -352,12 +379,6 @@ class Init(object):
if d and d not in sys.path:
sys.path.insert(0, d)
- # Ensure datasource fetched before activation (just incase)
- user_data_msg = self.datasource.get_userdata(True)
-
- # This keeps track of all the active handlers
- c_handlers = helpers.ContentHandlers()
-
def register_handlers_in_dir(path):
# Attempts to register any handler modules under the given path.
if not path or not os.path.isdir(path):
@@ -382,13 +403,16 @@ class Init(object):
util.logexc(LOG, "Failed to register handler from %s",
fname)
+ # This keeps track of all the active handlers
+ c_handlers = helpers.ContentHandlers()
+
# Add any handlers in the cloud-dir
register_handlers_in_dir(cdir)
# Register any other handlers that come from the default set. This
# is done after the cloud-dir handlers so that the cdir modules can
# take over the default user-data handler content-types.
- for mod in self._default_userdata_handlers():
+ for mod in c_handlers_list:
types = c_handlers.register(mod, overwrite=False)
if types:
LOG.debug("Added default handler for %s from %s", types, mod)
@@ -406,7 +430,7 @@ class Init(object):
handlers.call_begin(mod, data, frequency)
c_handlers.initialized.append(mod)
- def walk_handlers():
+ def walk_handlers(excluded):
# Walk the user data
part_data = {
'handlers': c_handlers,
@@ -419,9 +443,9 @@ class Init(object):
# to help write there contents to files with numbered
# names...
'handlercount': 0,
+ 'excluded': excluded,
}
- handlers.walk(user_data_msg, handlers.walker_callback,
- data=part_data)
+ handlers.walk(data_msg, handlers.walker_callback, data=part_data)
def finalize_handlers():
# Give callbacks opportunity to finalize
@@ -438,10 +462,16 @@ class Init(object):
try:
init_handlers()
- walk_handlers()
+ walk_handlers(excluded)
finally:
finalize_handlers()
+ def consume_data(self, frequency=PER_INSTANCE):
+ # Consume the userdata first, because we need want to let the part
+ # handlers run first (for merging stuff)
+ self._consume_userdata(frequency)
+ self._consume_vendordata(frequency)
+
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
# this consumed set.
@@ -453,6 +483,64 @@ class Init(object):
# objects before the load of the userdata happened,
# this is expected.
+ def _consume_vendordata(self, frequency=PER_INSTANCE):
+ """
+ Consume the vendordata and run the part handlers on it
+ """
+ # User-data should have been consumed first.
+ # So we merge the other available cloud-configs (everything except
+ # vendor provided), and check whether or not we should consume
+ # vendor data at all. That gives user or system a chance to override.
+ if not self.datasource.get_vendordata_raw():
+ LOG.debug("no vendordata from datasource")
+ return
+
+ _cc_merger = helpers.ConfigMerger(paths=self._paths,
+ datasource=self.datasource,
+ additional_fns=[],
+ base_cfg=self.cfg,
+ include_vendor=False)
+ vdcfg = _cc_merger.cfg.get('vendor_data', {})
+
+ if not isinstance(vdcfg, dict):
+ vdcfg = {'enabled': False}
+ LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg)
+
+ enabled = vdcfg.get('enabled')
+ no_handlers = vdcfg.get('disabled_handlers', None)
+
+ if not util.is_true(enabled):
+ LOG.debug("vendordata consumption is disabled.")
+ return
+
+ LOG.debug("vendor data will be consumed. disabled_handlers=%s",
+ no_handlers)
+
+ # Ensure vendordata source fetched before activation (just incase)
+ vendor_data_msg = self.datasource.get_vendordata()
+
+ # This keeps track of all the active handlers, while excluding what the
+ # users doesn't want run, i.e. boot_hook, cloud_config, shell_script
+ c_handlers_list = self._default_vendordata_handlers()
+
+ # Run the handlers
+ self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
+ excluded=no_handlers)
+
+ def _consume_userdata(self, frequency=PER_INSTANCE):
+ """
+ Consume the userdata and run the part handlers
+ """
+
+ # Ensure datasource fetched before activation (just incase)
+ user_data_msg = self.datasource.get_userdata(True)
+
+ # This keeps track of all the active handlers
+ c_handlers_list = self._default_handlers()
+
+ # Run the handlers
+ self._do_handlers(user_data_msg, c_handlers_list, frequency)
+
class Modules(object):
def __init__(self, init, cfg_files=None):
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index d49ea094..3032ef70 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -88,7 +88,11 @@ class UserDataProcessor(object):
def process(self, blob):
accumulating_msg = MIMEMultipart()
- self._process_msg(convert_string(blob), accumulating_msg)
+ if isinstance(blob, list):
+ for b in blob:
+ self._process_msg(convert_string(b), accumulating_msg)
+ else:
+ self._process_msg(convert_string(blob), accumulating_msg)
return accumulating_msg
def _process_msg(self, base_msg, append_msg):
diff --git a/cloudinit/util.py b/cloudinit/util.py
index a37172dc..ce8dacbe 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -26,6 +26,7 @@ from StringIO import StringIO
import contextlib
import copy as obj_copy
+import ctypes
import errno
import glob
import grp
@@ -36,6 +37,7 @@ import os.path
import platform
import pwd
import random
+import re
import shutil
import socket
import stat
@@ -608,18 +610,28 @@ def del_dir(path):
shutil.rmtree(path)
-def runparts(dirp, skip_no_exist=True):
+def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if skip_no_exist and not os.path.isdir(dirp):
return
failed = []
attempted = []
+
+ if exe_prefix is None:
+ prefix = []
+ elif isinstance(exe_prefix, str):
+ prefix = [str(exe_prefix)]
+ elif isinstance(exe_prefix, list):
+ prefix = exe_prefix
+ else:
+ raise TypeError("exe_prefix must be None, str, or list")
+
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
attempted.append(exe_path)
try:
- subp([exe_path], capture=False)
+ subp(prefix + [exe_path], capture=False)
except ProcessExecutionError as e:
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
failed.append(e)
@@ -865,8 +877,8 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
IP_address canonical_hostname [aliases...]
Fields of the entry are separated by any number of blanks and/or tab
- characters. Text from a "#" character until the end of the line is a
- comment, and is ignored. Host names may contain only alphanumeric
+ characters. Text from a "#" character until the end of the line is a
+ comment, and is ignored. Host names may contain only alphanumeric
characters, minus signs ("-"), and periods ("."). They must begin with
an alphabetic character and end with an alphanumeric character.
Optional aliases provide for name changes, alternate spellings, shorter
@@ -1302,11 +1314,25 @@ def mounts():
mounted = {}
try:
# Go through mounts to see what is already mounted
- mount_locs = load_file("/proc/mounts").splitlines()
+ if os.path.exists("/proc/mounts"):
+ mount_locs = load_file("/proc/mounts").splitlines()
+ method = 'proc'
+ else:
+ (mountoutput, _err) = subp("mount")
+ mount_locs = mountoutput.splitlines()
+ method = 'mount'
for mpline in mount_locs:
- # Format at: man fstab
+ # Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
+ # FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
try:
- (dev, mp, fstype, opts, _freq, _passno) = mpline.split()
+ if method == 'proc':
+ (dev, mp, fstype, opts, _freq, _passno) = mpline.split()
+ else:
+ m = re.search('^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', mpline)
+ dev = m.group(1)
+ mp = m.group(2)
+ fstype = m.group(3)
+ opts = m.group(4)
except:
continue
# If the name of the mount point contains spaces these
@@ -1317,9 +1343,9 @@ def mounts():
'mountpoint': mp,
'opts': opts,
}
- LOG.debug("Fetched %s mounts from %s", mounted, "/proc/mounts")
+ LOG.debug("Fetched %s mounts from %s", mounted, method)
except (IOError, OSError):
- logexc(LOG, "Failed fetching mount points from /proc/mounts")
+ logexc(LOG, "Failed fetching mount points")
return mounted
@@ -1404,12 +1430,26 @@ def time_rfc2822():
def uptime():
uptime_str = '??'
+ method = 'unknown'
try:
- contents = load_file("/proc/uptime").strip()
- if contents:
- uptime_str = contents.split()[0]
+ if os.path.exists("/proc/uptime"):
+ method = '/proc/uptime'
+ contents = load_file("/proc/uptime").strip()
+ if contents:
+ uptime_str = contents.split()[0]
+ else:
+ method = 'ctypes'
+ libc = ctypes.CDLL('/lib/libc.so.7')
+ size = ctypes.c_size_t()
+ buf = ctypes.c_int()
+ size.value = ctypes.sizeof(buf)
+ libc.sysctlbyname("kern.boottime", ctypes.byref(buf), ctypes.byref(size), None, 0)
+ now = time.time()
+ bootup = buf.value
+ uptime_str = now - bootup
+
except:
- logexc(LOG, "Unable to read uptime from /proc/uptime")
+ logexc(LOG, "Unable to read uptime using method: %s" % method)
return uptime_str
@@ -1748,6 +1788,19 @@ def parse_mtab(path):
return None
+def parse_mount(path):
+ (mountoutput, _err) = subp("mount")
+ mount_locs = mountoutput.splitlines()
+ for line in mount_locs:
+ m = re.search('^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
+ devpth = m.group(1)
+ mount_point = m.group(2)
+ fs_type = m.group(3)
+ if mount_point == path:
+ return devpth, fs_type, mount_point
+ return None
+
+
def get_mount_info(path, log=LOG):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)
@@ -1781,8 +1834,10 @@ def get_mount_info(path, log=LOG):
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log)
- else:
+ elif os.path.exists("/etc/mtab"):
return parse_mtab(path)
+ else:
+ return parse_mount(path)
def which(program):
diff --git a/config/cloud.cfg b/config/cloud.cfg
index a07cd3b0..b746e3db 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -64,6 +64,7 @@ cloud_config_modules:
# The modules that run in the 'final' stage
cloud_final_modules:
- rightscale_userdata
+ - scripts-vendor
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt
index e4d23cc9..74e07b62 100644
--- a/doc/examples/cloud-config-landscape.txt
+++ b/doc/examples/cloud-config-landscape.txt
@@ -6,6 +6,9 @@
#
# Note: 'tags' should be specified as a comma delimited string
# rather than a list.
+#
+# You can get example key/values by running 'landscape-config',
+# answer question, then look at /etc/landscape/client.config
landscape:
client:
url: "https://landscape.canonical.com/message-system"
@@ -13,3 +16,7 @@ landscape:
data_path: "/var/lib/landscape/client"
http_proxy: "http://my.proxy.com/foobar"
tags: "server,cloud"
+ computer_title = footitle
+ https_proxy = fooproxy
+ registration_key = fookey
+ account_name = fooaccount
diff --git a/doc/examples/cloud-config-vendor-data.txt b/doc/examples/cloud-config-vendor-data.txt
new file mode 100644
index 00000000..7f90847b
--- /dev/null
+++ b/doc/examples/cloud-config-vendor-data.txt
@@ -0,0 +1,16 @@
+#cloud-config
+#
+# This explains how to control vendordata via a cloud-config
+#
+# On select Datasources, vendors have a channel for the consumptions
+# of all support user-data types via a special channel called
+# vendordata. Users of the end system are given ultimate control.
+#
+vendor_data:
+ enabled: True
+ prefix: /usr/bin/ltrace
+
+# enabled: whether it is enabled or not
+# prefix: the command to run before any vendor scripts.
+# Note: this is a fairly weak method of containment. It should
+# be used to profile a script, not to prevent its run
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 5543ed34..cc0d0ede 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -130,10 +130,6 @@ To see which versions are supported from your cloud provider use the following U
...
latest
-**Note:** internally in cloudinit the `boto`_ library used to fetch the instance
-userdata and instance metadata, feel free to check that library out, it provides
-many other useful EC2 functionality.
-
---------------------------
Config Drive
---------------------------
diff --git a/doc/vendordata.txt b/doc/vendordata.txt
new file mode 100644
index 00000000..9acbe41c
--- /dev/null
+++ b/doc/vendordata.txt
@@ -0,0 +1,53 @@
+=== Overview ===
+Vendordata is data provided by the entity that launches an instance
+(for example, the cloud provider). This data can be used to
+customize the image to fit into the particular environment it is
+being run in.
+
+Vendordata follows the same rules as user-data, with the following
+caveats:
+ 1. Users have ultimate control over vendordata. They can disable its
+ execution or disable handling of specific parts of multipart input.
+ 2. By default it only runs on first boot
+ 3. Vendordata can be disabled by the user. If the use of vendordata is
+ required for the instance to run, then vendordata should not be
+ used.
+ 4. user supplied cloud-config is merged over cloud-config from
+ vendordata.
+
+Users providing cloud-config data can use the '#cloud-config-jsonp' method
+to more finely control their modifications to the vendor supplied
+cloud-config. For example, if both vendor and user have provided
+'runcnmd' then the default merge handler will cause the user's runcmd to
+override the one provided by the vendor. To append to 'runcmd', the user
+could better provide multipart input with a cloud-config-jsonp part like:
+ #cloud-config-jsonp
+ [{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}]
+
+Further, we strongly advise vendors to not 'be evil'. By evil, we
+mean any action that could compromise a system. Since users trust
+you, please take care to make sure that any vendordata is safe,
+atomic, idempotent and does not put your users at risk.
+
+=== Input Formats ===
+cloud-init will download and cache to filesystem any vendor-data that it
+finds. Vendordata is handled exactly like user-data. That means that
+the vendor can supply multipart input and have those parts acted on
+in the same way as user-data.
+
+The only differences are:
+ * user-scripts are stored in a different location than user-scripts (to
+ avoid namespace collision)
+ * user can disable part handlers by cloud-config settings.
+ For example, to disable handling of 'part-handlers' in vendor-data,
+ the user could provide user-data like this:
+ #cloud-config
+ vendordata: {excluded: 'text/part-handler'}
+
+=== Examples ===
+There are examples in the examples subdirectory.
+Additionally, the 'tools' directory contains 'write-mime-multipart',
+which can be used to easily generate mime-multi-part files from a list
+of input files. That data can then be given to an instance.
+
+See 'write-mime-multipart --help' for usage.
diff --git a/packages/bddeb b/packages/bddeb
index f52eb55f..9552aa40 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -29,7 +29,6 @@ import argparse
# file pypi package name to a debian/ubuntu package name.
PKG_MP = {
'argparse': 'python-argparse',
- 'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch | python-json-patch',
diff --git a/packages/brpm b/packages/brpm
index 8c90a0ab..f8ba1db1 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -36,7 +36,6 @@ from cloudinit import util
PKG_MP = {
'redhat': {
'argparse': 'python-argparse',
- 'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch',
@@ -48,7 +47,6 @@ PKG_MP = {
},
'suse': {
'argparse': 'python-argparse',
- 'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch',
diff --git a/packages/debian/control.in b/packages/debian/control.in
index fd9f3ffd..7e42b94b 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -25,6 +25,7 @@ Depends: procps,
#end for
python-software-properties | software-properties-common,
\${misc:Depends},
+Recommends: sudo
XB-Python-Version: \${python:Versions}
Description: Init scripts for cloud instances
Cloud instances need special scripts to run during initialisation
diff --git a/packages/debian/copyright b/packages/debian/copyright
index dc993525..f55bb7a3 100644
--- a/packages/debian/copyright
+++ b/packages/debian/copyright
@@ -27,25 +27,3 @@ License: GPL-3
The complete text of the GPL version 3 can be seen in
/usr/share/common-licenses/GPL-3.
-
-Files: cloudinit/boto_utils.py
-Copyright: 2006,2007, Mitch Garnaat http://garnaat.org/
-License: MIT
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish, dis-
- tribute, sublicense, and/or sell copies of the Software, and to permit
- persons to whom the Software is furnished to do so, subject to the fol-
- lowing conditions:
-
- The above copyright notice and this permission notice shall be included
- in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
- ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
- SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- IN THE SOFTWARE.
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 30bcd050..9614e2f1 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -34,6 +34,7 @@ Requires: e2fsprogs
Requires: net-tools
Requires: procps
Requires: shadow-utils
+Requires: sudo
# Install pypi 'dynamic' requirements
#for $r in $requires
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index 296505c6..c30a6fae 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -43,6 +43,7 @@ Requires: iproute2
Requires: e2fsprogs
Requires: net-tools
Requires: procps
+Requires: sudo
# Install pypi 'dynamic' requirements
#for $r in $requires
diff --git a/Requires b/requirements.txt
index f19c9691..8f695c68 100644
--- a/Requires
+++ b/requirements.txt
@@ -29,8 +29,5 @@ argparse
# Requests handles ssl correctly!
requests
-# Boto for ec2
-boto
-
# For patching pieces of cloud-config together
jsonpatch
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 00000000..4be0211d
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,6 @@
+httpretty>=0.7.1
+mocker
+nose
+pep8
+pyflakes
+pylint
diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_data.py
index 5ffe8f0a..68729c57 100644
--- a/tests/unittests/test_userdata.py
+++ b/tests/unittests/test_data.py
@@ -13,6 +13,7 @@ from email.mime.multipart import MIMEMultipart
from cloudinit import handlers
from cloudinit import helpers as c_helpers
from cloudinit import log
+from cloudinit.settings import (PER_INSTANCE)
from cloudinit import sources
from cloudinit import stages
from cloudinit import util
@@ -24,10 +25,11 @@ from tests.unittests import helpers
class FakeDataSource(sources.DataSource):
- def __init__(self, userdata):
+ def __init__(self, userdata=None, vendordata=None):
sources.DataSource.__init__(self, {}, None, None)
self.metadata = {'instance-id': INSTANCE_ID}
self.userdata_raw = userdata
+ self.vendordata_raw = vendordata
# FIXME: these tests shouldn't be checking log output??
@@ -45,6 +47,11 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
if self._log_handler and self._log:
self._log.removeHandler(self._log_handler)
+ def _patchIn(self, root):
+ self.restore()
+ self.patchOS(root)
+ self.patchUtils(root)
+
def capture_log(self, lvl=logging.DEBUG):
log_file = StringIO.StringIO()
self._log_handler = logging.StreamHandler(log_file)
@@ -68,13 +75,89 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(2, len(cc))
self.assertEquals('qux', cc['baz'])
self.assertEquals('qux2', cc['bar'])
+ def test_simple_jsonp_vendor_and_user(self):
+ # test that user-data wins over vendor
+ user_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/baz", "value": "qux" },
+ { "op": "add", "path": "/bar", "value": "qux2" }
+]
+'''
+ vendor_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/baz", "value": "quxA" },
+ { "op": "add", "path": "/bar", "value": "quxB" },
+ { "op": "add", "path": "/foo", "value": "quxC" }
+]
+'''
+ new_root = self.makeDir()
+ self._patchIn(new_root)
+ initer = stages.Init()
+ initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ _iid = initer.instancify()
+ initer.update()
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
+ mods = stages.Modules(initer)
+ (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ cfg = mods.cfg
+ self.assertIn('vendor_data', cfg)
+ self.assertEquals('qux', cfg['baz'])
+ self.assertEquals('qux2', cfg['bar'])
+ self.assertEquals('quxC', cfg['foo'])
+
+ def test_simple_jsonp_no_vendor_consumed(self):
+ # make sure that vendor data is not consumed
+ user_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/baz", "value": "qux" },
+ { "op": "add", "path": "/bar", "value": "qux2" },
+ { "op": "add", "path": "/vendor_data", "value": {"enabled": "false"}}
+]
+'''
+ vendor_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/baz", "value": "quxA" },
+ { "op": "add", "path": "/bar", "value": "quxB" },
+ { "op": "add", "path": "/foo", "value": "quxC" }
+]
+'''
+ new_root = self.makeDir()
+ self._patchIn(new_root)
+ initer = stages.Init()
+ initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ _iid = initer.instancify()
+ initer.update()
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
+ mods = stages.Modules(initer)
+ (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ cfg = mods.cfg
+ self.assertEquals('qux', cfg['baz'])
+ self.assertEquals('qux2', cfg['bar'])
+ self.assertNotIn('foo', cfg)
+
def test_mixed_cloud_config(self):
blob_cc = '''
#cloud-config
@@ -105,12 +188,87 @@ c: d
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(1, len(cc))
self.assertEquals('c', cc['a'])
+ def test_vendor_user_yaml_cloud_config(self):
+ vendor_blob = '''
+#cloud-config
+a: b
+name: vendor
+run:
+ - x
+ - y
+'''
+
+ user_blob = '''
+#cloud-config
+a: c
+vendor_data:
+ enabled: True
+ prefix: /bin/true
+name: user
+run:
+ - z
+'''
+ new_root = self.makeDir()
+ self._patchIn(new_root)
+ initer = stages.Init()
+ initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ _iid = initer.instancify()
+ initer.update()
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
+ mods = stages.Modules(initer)
+ (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ cfg = mods.cfg
+ self.assertIn('vendor_data', cfg)
+ self.assertEquals('c', cfg['a'])
+ self.assertEquals('user', cfg['name'])
+ self.assertNotIn('x', cfg['run'])
+ self.assertNotIn('y', cfg['run'])
+ self.assertIn('z', cfg['run'])
+
+ def test_vendordata_script(self):
+ vendor_blob = '''
+#!/bin/bash
+echo "test"
+'''
+
+ user_blob = '''
+#cloud-config
+vendor_data:
+ enabled: True
+ prefix: /bin/true
+'''
+ new_root = self.makeDir()
+ self._patchIn(new_root)
+ initer = stages.Init()
+ initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ _iid = initer.instancify()
+ initer.update()
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
+ mods = stages.Modules(initer)
+ (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ _cfg = mods.cfg
+ vendor_script = initer.paths.get_ipath_cur('vendor_scripts')
+ vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)
+ self.assertTrue(os.path.exists(vendor_script_fns))
+
def test_merging_cloud_config(self):
blob = '''
#cloud-config
@@ -185,7 +343,7 @@ p: 1
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertIn(
"Unhandled non-multipart (text/x-not-multipart) userdata:",
log_file.getvalue())
@@ -221,7 +379,7 @@ c: 4
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
contents = util.load_file(ci.paths.get_ipath("cloud_config"))
contents = util.load_yaml(contents)
self.assertTrue(isinstance(contents, dict))
@@ -244,7 +402,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertIn(
"Unhandled unknown content-type (text/plain)",
log_file.getvalue())
@@ -264,7 +422,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_x_shellscript(self):
@@ -284,7 +442,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_plain_shell(self):
@@ -304,5 +462,5 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
- ci.consume_userdata()
+ ci.consume_data()
self.assertEqual("", log_file.getvalue())
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index d5935294..3c1e8add 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -285,10 +285,11 @@ class TestConfigDriveDataSource(MockerTestCase):
self.assertEqual(["/dev/vdb", "/dev/zdd"],
ds.find_candidate_devs())
- # verify that partitions are not considered
+ # verify that partitions are considered, but only if they have a label.
devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
"TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
- self.assertEqual([], ds.find_candidate_devs())
+ self.assertEqual(["/dev/vdb3"],
+ ds.find_candidate_devs())
finally:
util.find_devs_with = orig_find_devs_with
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
new file mode 100644
index 00000000..dd588aca
--- /dev/null
+++ b/tests/unittests/test_ec2_util.py
@@ -0,0 +1,130 @@
+from tests.unittests import helpers
+
+from cloudinit import ec2_utils as eu
+
+import httpretty as hp
+
+
+class TestEc2Util(helpers.TestCase):
+ VERSION = 'latest'
+
+ @hp.activate
+ def test_userdata_fetch(self):
+ hp.register_uri(hp.GET,
+ 'http://169.254.169.254/%s/user-data' % (self.VERSION),
+ body='stuff',
+ status=200)
+ userdata = eu.get_instance_userdata(self.VERSION)
+ self.assertEquals('stuff', userdata)
+
+ @hp.activate
+ def test_userdata_fetch_fail_not_found(self):
+ hp.register_uri(hp.GET,
+ 'http://169.254.169.254/%s/user-data' % (self.VERSION),
+ status=404)
+ userdata = eu.get_instance_userdata(self.VERSION, retries=0)
+ self.assertEquals('', userdata)
+
+ @hp.activate
+ def test_userdata_fetch_fail_server_dead(self):
+ hp.register_uri(hp.GET,
+ 'http://169.254.169.254/%s/user-data' % (self.VERSION),
+ status=500)
+ userdata = eu.get_instance_userdata(self.VERSION, retries=0)
+ self.assertEquals('', userdata)
+
+ @hp.activate
+ def test_metadata_fetch_no_keys(self):
+ base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
+ hp.register_uri(hp.GET, base_url, status=200,
+ body="\n".join(['hostname',
+ 'instance-id',
+ 'ami-launch-index']))
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+ status=200, body='ec2.fake.host.name.com')
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+ status=200, body='123')
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'ami-launch-index'),
+ status=200, body='1')
+ md = eu.get_instance_metadata(self.VERSION, retries=0)
+ self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
+ self.assertEquals(md['instance-id'], '123')
+ self.assertEquals(md['ami-launch-index'], '1')
+
+ @hp.activate
+ def test_metadata_fetch_key(self):
+ base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
+ hp.register_uri(hp.GET, base_url, status=200,
+ body="\n".join(['hostname',
+ 'instance-id',
+ 'public-keys/']))
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+ status=200, body='ec2.fake.host.name.com')
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+ status=200, body='123')
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
+ status=200, body='0=my-public-key')
+ hp.register_uri(hp.GET,
+ eu.combine_url(base_url, 'public-keys/0/openssh-key'),
+ status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
+ md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
+ self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
+ self.assertEquals(md['instance-id'], '123')
+ self.assertEquals(1, len(md['public-keys']))
+
+ @hp.activate
+ def test_metadata_fetch_with_2_keys(self):
+ base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
+ hp.register_uri(hp.GET, base_url, status=200,
+ body="\n".join(['hostname',
+ 'instance-id',
+ 'public-keys/']))
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+ status=200, body='ec2.fake.host.name.com')
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+ status=200, body='123')
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
+ status=200,
+ body="\n".join(['0=my-public-key', '1=my-other-key']))
+ hp.register_uri(hp.GET,
+ eu.combine_url(base_url, 'public-keys/0/openssh-key'),
+ status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
+ hp.register_uri(hp.GET,
+ eu.combine_url(base_url, 'public-keys/1/openssh-key'),
+ status=200, body='ssh-rsa AAAA.....wZEf my-other-key')
+ md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
+ self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
+ self.assertEquals(md['instance-id'], '123')
+ self.assertEquals(2, len(md['public-keys']))
+
+ @hp.activate
+ def test_metadata_fetch_bdm(self):
+ base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
+ hp.register_uri(hp.GET, base_url, status=200,
+ body="\n".join(['hostname',
+ 'instance-id',
+ 'block-device-mapping/']))
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+ status=200, body='ec2.fake.host.name.com')
+ hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+ status=200, body='123')
+ hp.register_uri(hp.GET,
+ eu.combine_url(base_url, 'block-device-mapping/'),
+ status=200,
+ body="\n".join(['ami', 'ephemeral0']))
+ hp.register_uri(hp.GET,
+ eu.combine_url(base_url, 'block-device-mapping/ami'),
+ status=200,
+ body="sdb")
+ hp.register_uri(hp.GET,
+ eu.combine_url(base_url,
+ 'block-device-mapping/ephemeral0'),
+ status=200,
+ body="sdc")
+ md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
+ self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
+ self.assertEquals(md['instance-id'], '123')
+ bdm = md['block-device-mapping']
+ self.assertEquals(2, len(bdm))
+ self.assertEquals(bdm['ami'], 'sdb')
+ self.assertEquals(bdm['ephemeral0'], 'sdc')
diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py
index d9c3a455..5ffe95a2 100644
--- a/tests/unittests/test_runs/test_merge_run.py
+++ b/tests/unittests/test_runs/test_merge_run.py
@@ -35,8 +35,8 @@ class TestMergeRun(helpers.FilesystemMockingTestCase):
initer.datasource.userdata_raw = ud
_iid = initer.instancify()
initer.update()
- initer.cloudify().run('consume_userdata',
- initer.consume_userdata,
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mirrors = initer.distro.get_option('package_mirrors')
diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py
index 60ef812a..9a7178d1 100644
--- a/tests/unittests/test_runs/test_simple_run.py
+++ b/tests/unittests/test_runs/test_simple_run.py
@@ -66,8 +66,8 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.update()
self.assertTrue(os.path.islink("var/lib/cloud/instance"))
- initer.cloudify().run('consume_userdata',
- initer.consume_userdata,
+ initer.cloudify().run('consume_data',
+ initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
diff --git a/tools/read-dependencies b/tools/read-dependencies
index 3335f6a4..fee3efcf 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -1,32 +1,23 @@
-#!/bin/sh
+#!/usr/bin/env python
-set -e
+import os
+import sys
-find_root() {
- local topd
- if [ -z "${CLOUD_INIT_TOP_D}" ]; then
- topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
- else
- topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
- fi
- [ $? -eq 0 -a -f "${topd}/setup.py" ] || return
- ROOT_DIR="$topd"
-}
-fail() { echo "$0:" "$@" 1>&2; exit 1; }
+if 'CLOUD_INIT_TOP_D' in os.environ:
+ topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
+else:
+ topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-if ! find_root; then
- fail "Unable to locate 'setup.py' file that should " \
- "exist in the cloud-init root directory."
-fi
+for fname in ("setup.py", "requirements.txt"):
+ if not os.path.isfile(os.path.join(topd, fname)):
+ sys.stderr.write("Unable to locate '%s' file that should "
+ "exist in cloud-init root directory." % fname)
+ sys.exit(1)
-REQUIRES="$ROOT_DIR/Requires"
+with open(os.path.join(topd, "requirements.txt"), "r") as fp:
+ for line in fp:
+ if not line.strip() or line.startswith("#"):
+ continue
+ sys.stdout.write(line)
-if [ ! -e "$REQUIRES" ]; then
- fail "Unable to find 'Requires' file located at '$REQUIRES'"
-fi
-
-# Filter out comments and empty lines
-DEPS=$(sed -n -e 's,#.*,,' -e '/./p' "$REQUIRES") &&
- [ -n "$DEPS" ] ||
- fail "failed to read deps from '${REQUIRES}'"
-echo "$DEPS" | sort -d -f
+sys.exit(0)
diff --git a/tools/read-version b/tools/read-version
index 599f52cd..d02651e9 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -1,32 +1,26 @@
-#!/bin/sh
+#!/usr/bin/env python
-set -e
+import os
+import re
+import sys
-find_root() {
- local topd
- if [ -z "${CLOUD_INIT_TOP_D}" ]; then
- topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
- else
- topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
- fi
- [ $? -eq 0 -a -f "${topd}/setup.py" ] || return
- ROOT_DIR="$topd"
-}
-fail() { echo "$0:" "$@" 1>&2; exit 1; }
+if 'CLOUD_INIT_TOP_D' in os.environ:
+ topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
+else:
+ topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-if ! find_root; then
- fail "Unable to locate 'setup.py' file that should " \
- "exist in the cloud-init root directory."
-fi
+for fname in ("setup.py", "ChangeLog"):
+ if not os.path.isfile(os.path.join(topd, fname)):
+ sys.stderr.write("Unable to locate '%s' file that should "
+ "exist in cloud-init root directory." % fname)
+ sys.exit(1)
-CHNG_LOG="$ROOT_DIR/ChangeLog"
+vermatch = re.compile(r"^[0-9]+[.][0-9]+[.][0-9]+:$")
-if [ ! -e "$CHNG_LOG" ]; then
- fail "Unable to find 'ChangeLog' file located at '$CHNG_LOG'"
-fi
+with open(os.path.join(topd, "ChangeLog"), "r") as fp:
+ for line in fp:
+ if vermatch.match(line):
+ sys.stdout.write(line.strip()[:-1] + "\n")
+ break
-VERSION=$(sed -n '/^[0-9]\+[.][0-9]\+[.][0-9]\+:/ {s/://; p; :a;n; ba; }' \
- "$CHNG_LOG") &&
- [ -n "$VERSION" ] ||
- fail "failed to get version from '$CHNG_LOG'"
-echo "$VERSION"
+sys.exit(0)