summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoshua Harlow <harlowja@yahoo-inc.com>2014-02-07 15:14:26 -0800
committerJoshua Harlow <harlowja@yahoo-inc.com>2014-02-07 15:14:26 -0800
commit2983a26ecf9716dc957ec4bacf15544072774190 (patch)
treea7e074c21789ebc553713b3fd591fd9bbe4a9684
parent810df2c55c108e7e4064263e508d9786d8b1dc8e (diff)
parent3cfe9b3d8958b1a4e450d5ff31d805c424945027 (diff)
downloadvyos-cloud-init-2983a26ecf9716dc957ec4bacf15544072774190.tar.gz
vyos-cloud-init-2983a26ecf9716dc957ec4bacf15544072774190.zip
Remerged with trunk
-rw-r--r--ChangeLog8
-rw-r--r--cloudinit/config/cc_debug.py4
-rw-r--r--cloudinit/config/cc_growpart.py46
-rw-r--r--cloudinit/config/cc_power_state_change.py29
-rw-r--r--cloudinit/config/cc_resizefs.py40
-rw-r--r--cloudinit/distros/__init__.py1
-rw-r--r--cloudinit/distros/freebsd.py259
-rw-r--r--cloudinit/distros/net_util.py163
-rw-r--r--cloudinit/distros/rhel.py3
-rw-r--r--cloudinit/distros/rhel_util.py88
-rw-r--r--cloudinit/distros/sles.py3
-rw-r--r--cloudinit/ec2_utils.py26
-rw-r--r--cloudinit/netinfo.py44
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py9
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py92
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py101
-rw-r--r--cloudinit/sources/__init__.py2
-rw-r--r--cloudinit/url_helper.py51
-rw-r--r--cloudinit/user_data.py3
-rw-r--r--cloudinit/util.py151
-rw-r--r--doc/examples/cloud-config-landscape.txt7
-rw-r--r--doc/rtd/conf.py3
-rw-r--r--doc/sources/smartos/README.rst92
-rwxr-xr-xsetup.py6
-rwxr-xr-xsysvinit/freebsd/cloudconfig34
-rwxr-xr-xsysvinit/freebsd/cloudfinal34
-rwxr-xr-xsysvinit/freebsd/cloudinit34
-rwxr-xr-xsysvinit/freebsd/cloudinitlocal34
-rw-r--r--tests/unittests/helpers.py3
-rw-r--r--tests/unittests/test__init__.py6
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py5
-rw-r--r--tests/unittests/test_datasource/test_maas.py5
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py35
-rw-r--r--tests/unittests/test_datasource/test_smartos.py145
-rw-r--r--tests/unittests/test_ec2_util.py8
-rw-r--r--tests/unittests/test_pathprefix2dict.py40
-rwxr-xr-xtools/read-dependencies45
-rwxr-xr-xtools/read-version46
-rwxr-xr-xtools/run-pep811
-rwxr-xr-xtools/run-pylint3
40 files changed, 1402 insertions, 317 deletions
diff --git a/ChangeLog b/ChangeLog
index cb9586f0..6c8fe90a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -13,10 +13,16 @@
redirect cloud-init stderr and stdout /var/log/cloud-init-output.log.
- drop support for resizing partitions with parted entirely (LP: #1212492).
This was broken as it was anyway.
- - add support for vendordata.
+ - add support for vendordata in SmartOS and NoCloud datasources.
- drop dependency on boto for crawling ec2 metadata service.
- add 'Requires' on sudo (for OpenNebula datasource) in rpm specs, and
'Recommends' in the debian/control.in [Vlastimil Holer]
+ - if mount_info reports /dev/root is a device path for /, then convert
+ that to a device via help of kernel cmdline.
+ - configdrive: consider partitions as possible datasources if they have
+ theh correct filesystem label. [Paul Querna]
+ - initial freebsd support [Harm Weites]
+ - fix in is_ipv4 to accept IP addresses with a '0' in them.
0.7.4:
- fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
partitioned block device with target filesystem on ephemeral0.1.
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index cfd31fa1..7219b0f8 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -14,10 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
-from cloudinit import util
from cloudinit import type_utils
+from cloudinit import util
import copy
+from StringIO import StringIO
def _make_header(text):
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 6bddf847..f52c41f0 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -114,6 +114,41 @@ class ResizeGrowPart(object):
return (before, get_size(partdev))
+class ResizeGpart(object):
+ def available(self):
+ if not util.which('gpart'):
+ return False
+ return True
+
+ def resize(self, diskdev, partnum, partdev):
+ """
+ GPT disks store metadata at the beginning (primary) and at the
+ end (secondary) of the disk. When launching an image with a
+ larger disk compared to the original image, the secondary copy
+ is lost. Thus, the metadata will be marked CORRUPT, and need to
+ be recovered.
+ """
+ try:
+ util.subp(["gpart", "recover", diskdev])
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 0:
+ util.logexc(LOG, "Failed: gpart recover %s", diskdev)
+ raise ResizeFailedException(e)
+
+ before = get_size(partdev)
+ try:
+ util.subp(["gpart", "resize", "-i", partnum, diskdev])
+ except util.ProcessExecutionError as e:
+ util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
+ raise ResizeFailedException(e)
+
+ # Since growing the FS requires a reboot, make sure we reboot
+ # first when this module has finished.
+ open('/var/run/reboot-required', 'a').close()
+
+ return (before, get_size(partdev))
+
+
def get_size(filename):
fd = os.open(filename, os.O_RDONLY)
try:
@@ -132,6 +167,12 @@ def device_part_info(devpath):
bname = os.path.basename(rpath)
syspath = "/sys/class/block/%s" % bname
+ # FreeBSD doesn't know of sysfs so just get everything we need from
+ # the device, like /dev/vtbd0p2.
+ if util.system_info()["platform"].startswith('FreeBSD'):
+ m = re.search('^(/dev/.+)p([0-9])$', devpath)
+ return (m.group(1), m.group(2))
+
if not os.path.exists(syspath):
raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
@@ -182,7 +223,8 @@ def resize_devices(resizer, devices):
"stat of '%s' failed: %s" % (blockdev, e),))
continue
- if not stat.S_ISBLK(statret.st_mode):
+ if (not stat.S_ISBLK(statret.st_mode) and
+ not stat.S_ISCHR(statret.st_mode)):
info.append((devent, RESIZE.SKIPPED,
"device '%s' not a block device" % blockdev,))
continue
@@ -255,4 +297,4 @@ def handle(_name, cfg, _cloud, log, _args):
else:
log.debug("'%s' %s: %s" % (entry, action, msg))
-RESIZERS = (('growpart', ResizeGrowPart),)
+RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index e3150808..561c5abd 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -22,6 +22,7 @@ from cloudinit import util
import errno
import os
import re
+import signal
import subprocess
import time
@@ -30,6 +31,24 @@ frequency = PER_INSTANCE
EXIT_FAIL = 254
+def givecmdline(pid):
+ # Returns the cmdline for the given process id. In Linux we can use procfs
+ # for this but on BSD there is /usr/bin/procstat.
+ try:
+ # Example output from procstat -c 1
+ # PID COMM ARGS
+ # 1 init /bin/init --
+ if util.system_info()["platform"].startswith('FreeBSD'):
+ (output, _err) = util.subp(['procstat', '-c', str(pid)])
+ line = output.splitlines()[1]
+ m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
+ return m.group(2)
+ else:
+ return util.load_file("/proc/%s/cmdline" % pid)
+ except IOError:
+ return None
+
+
def handle(_name, cfg, _cloud, log, _args):
try:
@@ -42,8 +61,8 @@ def handle(_name, cfg, _cloud, log, _args):
return
mypid = os.getpid()
- cmdline = util.load_file("/proc/%s/cmdline" % mypid)
+ cmdline = givecmdline(mypid)
if not cmdline:
log.warn("power_state: failed to get cmdline of current process")
return
@@ -119,8 +138,6 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
msg = None
end_time = time.time() + timeout
- cmdline_f = "/proc/%s/cmdline" % pid
-
def fatal(msg):
if log:
log.warn(msg)
@@ -134,16 +151,14 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
break
try:
- cmdline = ""
- with open(cmdline_f) as fp:
- cmdline = fp.read()
+ cmdline = givecmdline(pid)
if cmdline != pidcmdline:
msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
break
except IOError as ioerr:
if ioerr.errno in known_errnos:
- msg = "pidfile '%s' gone [%d]" % (cmdline_f, ioerr.errno)
+ msg = "pidfile gone [%d]" % ioerr.errno
else:
fatal("IOError during wait: %s" % ioerr)
break
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 56040fdd..be406034 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -39,6 +39,10 @@ def _resize_ext(mount_point, devpth): # pylint: disable=W0613
def _resize_xfs(mount_point, devpth): # pylint: disable=W0613
return ('xfs_growfs', devpth)
+
+def _resize_ufs(mount_point, devpth): # pylint: disable=W0613
+ return ('growfs', devpth)
+
# Do not use a dictionary as these commands should be able to be used
# for multiple filesystem types if possible, e.g. one command for
# ext2, ext3 and ext4.
@@ -46,11 +50,31 @@ RESIZE_FS_PREFIXES_CMDS = [
('btrfs', _resize_btrfs),
('ext', _resize_ext),
('xfs', _resize_xfs),
+ ('ufs', _resize_ufs),
]
NOBLOCK = "noblock"
+def rootdev_from_cmdline(cmdline):
+ found = None
+ for tok in cmdline.split():
+ if tok.startswith("root="):
+ found = tok[5:]
+ break
+ if found is None:
+ return None
+
+ if found.startswith("/dev/"):
+ return found
+ if found.startswith("LABEL="):
+ return "/dev/disk/by-label/" + found[len("LABEL="):]
+ if found.startswith("UUID="):
+ return "/dev/disk/by-uuid/" + found[len("UUID="):]
+
+ return "/dev/" + found
+
+
def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
resize_root = args[0]
@@ -78,10 +102,20 @@ def handle(name, cfg, _cloud, log, args):
info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
log.debug("resize_info: %s" % info)
+ container = util.is_container()
+
+ if (devpth == "/dev/root" and not os.path.exists(devpth) and
+ not container):
+ devpth = rootdev_from_cmdline(util.get_cmdline())
+ if devpth is None:
+ log.warn("Unable to find device '/dev/root'")
+ return
+ log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
+
try:
statret = os.stat(devpth)
except OSError as exc:
- if util.is_container() and exc.errno == errno.ENOENT:
+ if container and exc.errno == errno.ENOENT:
log.debug("Device '%s' did not exist in container. "
"cannot resize: %s" % (devpth, info))
elif exc.errno == errno.ENOENT:
@@ -91,8 +125,8 @@ def handle(name, cfg, _cloud, log, args):
raise exc
return
- if not stat.S_ISBLK(statret.st_mode):
- if util.is_container():
+ if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
+ if container:
log.debug("device '%s' not a block device in container."
" cannot resize: %s" % (devpth, info))
else:
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 74e95797..46b67fa3 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -39,6 +39,7 @@ from cloudinit.distros.parsers import hosts
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'redhat': ['fedora', 'rhel'],
+ 'freebsd': ['freebsd'],
'suse': ['sles']
}
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
new file mode 100644
index 00000000..afb502c9
--- /dev/null
+++ b/cloudinit/distros/freebsd.py
@@ -0,0 +1,259 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 Harm Weites
+#
+# Author: Harm Weites <harm@weites.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from StringIO import StringIO
+
+import re
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import ssh_util
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+ rc_conf_fn = "/etc/rc.conf"
+ login_conf_fn = '/etc/login.conf'
+ login_conf_fn_bak = '/etc/login.conf.orig'
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = 'freebsd'
+
+ # Updates a key in /etc/rc.conf.
+ def updatercconf(self, key, value):
+ LOG.debug("updatercconf: %s => %s", key, value)
+ conf = self.loadrcconf()
+ config_changed = False
+ for item in conf:
+ if item == key and conf[item] != value:
+ conf[item] = value
+ LOG.debug("[rc.conf]: Value %s for key %s needs to be changed",
+ value, key)
+ config_changed = True
+
+ if config_changed:
+ LOG.debug("Writing new %s file", self.rc_conf_fn)
+ buf = StringIO()
+ for keyval in conf.items():
+ buf.write("%s=%s\n" % keyval)
+ util.write_file(self.rc_conf_fn, buf.getvalue())
+
+ # Load the contents of /etc/rc.conf and store all keys in a dict.
+ def loadrcconf(self):
+ conf = {}
+ lines = util.load_file(self.rc_conf_fn).splitlines()
+ for line in lines:
+ tok = line.split('=')
+ conf[tok[0]] = tok[1].rstrip()
+ return conf
+
+ def readrcconf(self, key):
+ conf = self.loadrcconf()
+ try:
+ val = conf[key]
+ except KeyError:
+ val = None
+ return val
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname()
+ return ('rc.conf', sys_hostname)
+
+ def _read_hostname(self, filename, default=None):
+ hostname = None
+ try:
+ hostname = self.readrcconf('hostname')
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
+
+ def _select_hostname(self, hostname, fqdn):
+ if not hostname:
+ return fqdn
+ return hostname
+
+ def _write_hostname(self, hostname, filename):
+ self.updatercconf('hostname', hostname)
+
+ def create_group(self, name, members):
+ group_add_cmd = ['pw', '-n', name]
+ if util.is_group(name):
+ LOG.warn("Skipping creation of existing group '%s'", name)
+ else:
+ try:
+ util.subp(group_add_cmd)
+ LOG.info("Created new group %s", name)
+ except Exception as e:
+ util.logexc(LOG, "Failed to create group %s", name)
+ raise e
+
+ if len(members) > 0:
+ for member in members:
+ if not util.is_user(member):
+ LOG.warn("Unable to add group member '%s' to group '%s'"
+ "; user does not exist.", member, name)
+ continue
+ try:
+ util.subp(['pw', 'usermod', '-n', name, '-G', member])
+ LOG.info("Added user '%s' to group '%s'", member, name)
+ except Exception:
+ util.logexc(LOG, "Failed to add user '%s' to group '%s'",
+ member, name)
+
+ def add_user(self, name, **kwargs):
+ if util.is_user(name):
+ LOG.info("User %s already exists, skipping.", name)
+ return False
+
+ adduser_cmd = ['pw', 'useradd', '-n', name]
+ log_adduser_cmd = ['pw', 'useradd', '-n', name]
+
+ adduser_opts = {
+ "homedir": '-d',
+ "gecos": '-c',
+ "primary_group": '-g',
+ "groups": '-G',
+ "passwd": '-h',
+ "shell": '-s',
+ "inactive": '-E',
+ }
+ adduser_flags = {
+ "no_user_group": '--no-user-group',
+ "system": '--system',
+ "no_log_init": '--no-log-init',
+ }
+
+ redact_opts = ['passwd']
+
+ for key, val in kwargs.iteritems():
+ if key in adduser_opts and val and isinstance(val, basestring):
+ adduser_cmd.extend([adduser_opts[key], val])
+
+ # Redact certain fields from the logs
+ if key in redact_opts:
+ log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
+ else:
+ log_adduser_cmd.extend([adduser_opts[key], val])
+
+ elif key in adduser_flags and val:
+ adduser_cmd.append(adduser_flags[key])
+ log_adduser_cmd.append(adduser_flags[key])
+
+ if 'no_create_home' in kwargs or 'system' in kwargs:
+ adduser_cmd.append('-d/nonexistent')
+ log_adduser_cmd.append('-d/nonexistent')
+ else:
+ adduser_cmd.append('-d/usr/home/%s' % name)
+ adduser_cmd.append('-m')
+ log_adduser_cmd.append('-d/usr/home/%s' % name)
+ log_adduser_cmd.append('-m')
+
+ # Run the command
+ LOG.info("Adding user %s", name)
+ try:
+ util.subp(adduser_cmd, logstring=log_adduser_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to create user %s", name)
+ raise e
+
+ # TODO:
+ def set_passwd(self, user, passwd, hashed=False):
+ return False
+
+ def lock_passwd(self, name):
+ try:
+ util.subp(['pw', 'usermod', name, '-h', '-'])
+ except Exception as e:
+ util.logexc(LOG, "Failed to lock user %s", name)
+ raise e
+
+ # TODO:
+ def write_sudo_rules(self, name, rules, sudo_file=None):
+ LOG.debug("[write_sudo_rules] Name: %s", name)
+
+ def create_user(self, name, **kwargs):
+ self.add_user(name, **kwargs)
+
+ # Set password if plain-text password provided and non-empty
+ if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
+ self.set_passwd(name, kwargs['plain_text_passwd'])
+
+ # Default locking down the account. 'lock_passwd' defaults to True.
+ # lock account unless lock_password is False.
+ if kwargs.get('lock_passwd', True):
+ self.lock_passwd(name)
+
+ # Configure sudo access
+ if 'sudo' in kwargs:
+ self.write_sudo_rules(name, kwargs['sudo'])
+
+ # Import SSH keys
+ if 'ssh_authorized_keys' in kwargs:
+ keys = set(kwargs['ssh_authorized_keys']) or []
+ ssh_util.setup_user_keys(keys, name, options=None)
+
+ def _write_network(self, settings):
+ return
+
+ def apply_locale(self, locale, out_fn=None):
+ # Adjust the locals value to the new value
+ newconf = StringIO()
+ for line in util.load_file(self.login_conf_fn).splitlines():
+ newconf.write(re.sub(r'^default:',
+ r'default:lang=%s:' % locale, line))
+ newconf.write("\n")
+
+ # Make a backup of login.conf.
+ util.copy(self.login_conf_fn, self.login_conf_fn_bak)
+
+ # And write the new login.conf.
+ util.write_file(self.login_conf_fn, newconf.getvalue())
+
+ try:
+ LOG.debug("Running cap_mkdb for %s", locale)
+ util.subp(['cap_mkdb', self.login_conf_fn])
+ except util.ProcessExecutionError:
+ # cap_mkdb failed, so restore the backup.
+ util.logexc(LOG, "Failed to apply locale %s", locale)
+ try:
+ util.copy(self.login_conf_fn_bak, self.login_conf_fn)
+ except IOError:
+ util.logexc(LOG, "Failed to restore %s backup",
+ self.login_conf_fn)
+
+ def install_packages(self, pkglist):
+ return
+
+ def package_command(self, cmd, args=None, pkgs=None):
+ return
+
+ def set_timezone(self, tz):
+ return
+
+ def update_package_sources(self):
+ return
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
new file mode 100644
index 00000000..b9bcfd8b
--- /dev/null
+++ b/cloudinit/distros/net_util.py
@@ -0,0 +1,163 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# This is a util function to translate debian based distro interface blobs as
+# given in /etc/network/interfaces to an *somewhat* agnostic format for
+# distributions that use other formats.
+#
+# TODO(harlowja) remove when we have python-netcf active...
+#
+# The format is the following:
+# {
+# <device-name>: {
+# # All optional (if not existent in original format)
+# "netmask": <ip>,
+# "broadcast": <ip>,
+# "gateway": <ip>,
+# "address": <ip>,
+# "bootproto": "static"|"dhcp",
+# "dns-search": <hostname>,
+# "hwaddress": <mac-address>,
+# "auto": True (or non-existent),
+# "dns-nameservers": [<ip/hostname>, ...],
+# }
+# }
+#
+# Things to note, comments are removed, if a ubuntu/debian interface is
+# marked as auto then only then first segment (?) is retained, ie
+# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1).
+#
+# Example input:
+#
+# auto lo
+# iface lo inet loopback
+#
+# auto eth0
+# iface eth0 inet static
+# address 10.0.0.1
+# netmask 255.255.252.0
+# broadcast 10.0.0.255
+# gateway 10.0.0.2
+# dns-nameservers 98.0.0.1 98.0.0.2
+#
+# Example output:
+# {
+# "lo": {
+# "auto": true
+# },
+# "eth0": {
+# "auto": true,
+# "dns-nameservers": [
+# "98.0.0.1",
+# "98.0.0.2"
+# ],
+# "broadcast": "10.0.0.255",
+# "netmask": "255.255.252.0",
+# "bootproto": "static",
+# "address": "10.0.0.1",
+# "gateway": "10.0.0.2"
+# }
+# }
+
+def translate_network(settings):
+ # Get the standard cmd, args from the ubuntu format
+ entries = []
+ for line in settings.splitlines():
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ split_up = line.split(None, 1)
+ if len(split_up) <= 1:
+ continue
+ entries.append(split_up)
+ # Figure out where each iface section is
+ ifaces = []
+ consume = {}
+ for (cmd, args) in entries:
+ if cmd == 'iface':
+ if consume:
+ ifaces.append(consume)
+ consume = {}
+ consume[cmd] = args
+ else:
+ consume[cmd] = args
+ # Check if anything left over to consume
+ absorb = False
+ for (cmd, args) in consume.iteritems():
+ if cmd == 'iface':
+ absorb = True
+ if absorb:
+ ifaces.append(consume)
+ # Now translate
+ real_ifaces = {}
+ for info in ifaces:
+ if 'iface' not in info:
+ continue
+ iface_details = info['iface'].split(None)
+ dev_name = None
+ if len(iface_details) >= 1:
+ dev = iface_details[0].strip().lower()
+ if dev:
+ dev_name = dev
+ if not dev_name:
+ continue
+ iface_info = {}
+ if len(iface_details) >= 3:
+ proto_type = iface_details[2].strip().lower()
+ # Seems like this can be 'loopback' which we don't
+ # really care about
+ if proto_type in ['dhcp', 'static']:
+ iface_info['bootproto'] = proto_type
+ # These can just be copied over
+ for k in ['netmask', 'address', 'gateway', 'broadcast']:
+ if k in info:
+ val = info[k].strip().lower()
+ if val:
+ iface_info[k] = val
+ # Name server info provided??
+ if 'dns-nameservers' in info:
+ iface_info['dns-nameservers'] = info['dns-nameservers'].split()
+ # Name server search info provided??
+ if 'dns-search' in info:
+ iface_info['dns-search'] = info['dns-search'].split()
+ # Is any mac address spoofing going on??
+ if 'hwaddress' in info:
+ hw_info = info['hwaddress'].lower().strip()
+ hw_split = hw_info.split(None, 1)
+ if len(hw_split) == 2 and hw_split[0].startswith('ether'):
+ hw_addr = hw_split[1]
+ if hw_addr:
+ iface_info['hwaddress'] = hw_addr
+ real_ifaces[dev_name] = iface_info
+ # Check for those that should be started on boot via 'auto'
+ for (cmd, args) in entries:
+ if cmd == 'auto':
+ # Seems like auto can be like 'auto eth0 eth0:1' so just get the
+ # first part out as the device name
+ args = args.split(None)
+ if not args:
+ continue
+ dev_name = args[0].strip().lower()
+ if dev_name in real_ifaces:
+ real_ifaces[dev_name]['auto'] = True
+ return real_ifaces
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 30195384..e8abf111 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -25,6 +25,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@@ -63,7 +64,7 @@ class Distro(distros.Distro):
def _write_network(self, settings):
# TODO(harlowja) fix this... since this is the ubuntu format
- entries = rhel_util.translate_network(settings)
+ entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the rhel format...
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
index 1aba58b8..063d536e 100644
--- a/cloudinit/distros/rhel_util.py
+++ b/cloudinit/distros/rhel_util.py
@@ -30,94 +30,6 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
-# This is a util function to translate Debian based distro interface blobs as
-# given in /etc/network/interfaces to an equivalent format for distributions
-# that use ifcfg-* style (Red Hat and SUSE).
-# TODO(harlowja) remove when we have python-netcf active...
-def translate_network(settings):
- # Get the standard cmd, args from the ubuntu format
- entries = []
- for line in settings.splitlines():
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- split_up = line.split(None, 1)
- if len(split_up) <= 1:
- continue
- entries.append(split_up)
- # Figure out where each iface section is
- ifaces = []
- consume = {}
- for (cmd, args) in entries:
- if cmd == 'iface':
- if consume:
- ifaces.append(consume)
- consume = {}
- consume[cmd] = args
- else:
- consume[cmd] = args
- # Check if anything left over to consume
- absorb = False
- for (cmd, args) in consume.iteritems():
- if cmd == 'iface':
- absorb = True
- if absorb:
- ifaces.append(consume)
- # Now translate
- real_ifaces = {}
- for info in ifaces:
- if 'iface' not in info:
- continue
- iface_details = info['iface'].split(None)
- dev_name = None
- if len(iface_details) >= 1:
- dev = iface_details[0].strip().lower()
- if dev:
- dev_name = dev
- if not dev_name:
- continue
- iface_info = {}
- if len(iface_details) >= 3:
- proto_type = iface_details[2].strip().lower()
- # Seems like this can be 'loopback' which we don't
- # really care about
- if proto_type in ['dhcp', 'static']:
- iface_info['bootproto'] = proto_type
- # These can just be copied over
- for k in ['netmask', 'address', 'gateway', 'broadcast']:
- if k in info:
- val = info[k].strip().lower()
- if val:
- iface_info[k] = val
- # Name server info provided??
- if 'dns-nameservers' in info:
- iface_info['dns-nameservers'] = info['dns-nameservers'].split()
- # Name server search info provided??
- if 'dns-search' in info:
- iface_info['dns-search'] = info['dns-search'].split()
- # Is any mac address spoofing going on??
- if 'hwaddress' in info:
- hw_info = info['hwaddress'].lower().strip()
- hw_split = hw_info.split(None, 1)
- if len(hw_split) == 2 and hw_split[0].startswith('ether'):
- hw_addr = hw_split[1]
- if hw_addr:
- iface_info['hwaddress'] = hw_addr
- real_ifaces[dev_name] = iface_info
- # Check for those that should be started on boot via 'auto'
- for (cmd, args) in entries:
- if cmd == 'auto':
- # Seems like auto can be like 'auto eth0 eth0:1' so just get the
- # first part out as the device name
- args = args.split(None)
- if not args:
- continue
- dev_name = args[0].strip().lower()
- if dev_name in real_ifaces:
- real_ifaces[dev_name]['auto'] = True
- return real_ifaces
-
-
# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
def update_sysconfig_file(fn, adjustments, allow_empty=False):
if not adjustments:
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index f2ac4efc..9788a1ba 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -26,6 +26,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@@ -54,7 +55,7 @@ class Distro(distros.Distro):
def _write_network(self, settings):
# Convert debian settings to ifcfg format
- entries = rhel_util.translate_network(settings)
+ entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the suse format...
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 80736a8f..91cba20f 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import httplib
from urlparse import (urlparse, urlunparse)
import functools
@@ -26,6 +27,7 @@ from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
+SKIP_USERDATA_CODES = frozenset([httplib.NOT_FOUND])
def maybe_json_object(text):
@@ -128,20 +130,38 @@ class MetadataMaterializer(object):
return joined
+def _skip_retry_on_codes(status_codes, _request_args, cause):
+ """Returns if a request should retry based on a given set of codes that
+ case retrying to be stopped/skipped.
+ """
+ if cause.code in status_codes:
+ return False
+ return True
+
+
def get_instance_userdata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5):
ud_url = url_helper.combine_url(metadata_address, api_version)
ud_url = url_helper.combine_url(ud_url, 'user-data')
+ user_data = ''
try:
+ # It is ok for userdata to not exist (thats why we are stopping if
+ # NOT_FOUND occurs) and just in that case returning an empty string.
+ exception_cb = functools.partial(_skip_retry_on_codes,
+ SKIP_USERDATA_CODES)
response = util.read_file_or_url(ud_url,
ssl_details=ssl_details,
timeout=timeout,
- retries=retries)
- return str(response)
+ retries=retries,
+ exception_cb=exception_cb)
+ user_data = str(response)
+ except url_helper.UrlError as e:
+ if e.code not in SKIP_USERDATA_CODES:
+ util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
except Exception:
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
- return ''
+ return user_data
def get_instance_metadata(api_version='latest',
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index feba5a62..ac3c011f 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -21,6 +21,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cloudinit.util as util
+import re
from prettytable import PrettyTable
@@ -40,27 +41,40 @@ def netdev_info(empty=""):
toks = line.lower().strip().split()
if toks[0] == "up":
devs[curdev]['up'] = True
+ # If the output of ifconfig doesn't contain the required info in the
+ # obvious place, use a regex filter to be sure.
+ elif len(toks) > 1:
+ if re.search(r"flags=\d+<up,", toks[1]):
+ devs[curdev]['up'] = True
fieldpost = ""
if toks[0] == "inet6":
fieldpost = "6"
for i in range(len(toks)):
- if toks[i] == "hwaddr":
+ if toks[i] == "hwaddr" or toks[i] == "ether":
try:
devs[curdev]["hwaddr"] = toks[i + 1]
except IndexError:
pass
- for field in ("addr", "bcast", "mask"):
+
+ # Couple the different items we're interested in with the correct
+ # field since FreeBSD/CentOS/Fedora differ in the output.
+ ifconfigfields = {
+ "addr:": "addr", "inet": "addr",
+ "bcast:": "bcast", "broadcast": "bcast",
+ "mask:": "mask", "netmask": "mask"
+ }
+ for origfield, field in ifconfigfields.items():
target = "%s%s" % (field, fieldpost)
if devs[curdev].get(target, ""):
continue
- if toks[i] == "%s:" % field:
+ if toks[i] == "%s" % origfield:
try:
devs[curdev][target] = toks[i + 1]
except IndexError:
pass
- elif toks[i].startswith("%s:" % field):
+ elif toks[i].startswith("%s" % origfield):
devs[curdev][target] = toks[i][len(field) + 1:]
if empty != "":
@@ -73,15 +87,32 @@ def netdev_info(empty=""):
def route_info():
- (route_out, _err) = util.subp(["route", "-n"])
+ (route_out, _err) = util.subp(["netstat", "-rn"])
routes = []
entries = route_out.splitlines()[1:]
for line in entries:
if not line:
continue
toks = line.split()
- if len(toks) < 8 or toks[0] == "Kernel" or toks[0] == "Destination":
+
+ # FreeBSD shows 6 items in the routing table:
+ # Destination Gateway Flags Refs Use Netif Expire
+ # default 10.65.0.1 UGS 0 34920 vtnet0
+ #
+ # Linux netstat shows 2 more:
+ # Destination Gateway Genmask Flags MSS Window irtt Iface
+ # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
+ if (len(toks) < 6 or toks[0] == "Kernel" or
+ toks[0] == "Destination" or toks[0] == "Internet" or
+ toks[0] == "Internet6" or toks[0] == "Routing"):
continue
+
+ if len(toks) < 8:
+ toks.append("-")
+ toks.append("-")
+ toks[7] = toks[5]
+ toks[5] = "-"
+
entry = {
'destination': toks[0],
'gateway': toks[1],
@@ -92,6 +123,7 @@ def route_info():
'use': toks[6],
'iface': toks[7],
}
+
routes.append(entry)
return routes
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index c45a1119..1d30fe08 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -246,10 +246,13 @@ def find_candidate_devs(probe_optical=True):
# combine list of items by putting by-label items first
# followed by fstype items, but with dupes removed
- combined = (by_label + [d for d in by_fstype if d not in by_label])
+ candidates = (by_label + [d for d in by_fstype if d not in by_label])
- # we are looking for block device (sda, not sda1), ignore partitions
- return [d for d in combined if not util.is_partition(d)]
+ # We are looking for a block device or partition with necessary label or
+ # an unpartitioned block device (ex sda, not sda1)
+ devices = [d for d in candidates
+ if d in by_label or not util.is_partition(d)]
+ return devices
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 4ef92a56..cbaac29f 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -50,40 +50,47 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
- md = {}
- ud = ""
+ mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}
try:
# Parse the kernel command line, getting data passed in
+ md = {}
if parse_cmdline_data(self.cmdline_id, md):
found.append("cmdline")
+ mydata.update(md)
except:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
- seedret = {}
- if util.read_optional_seed(seedret, base=self.seed_dir + "/"):
- md = util.mergemanydict([md, seedret['meta-data']])
- ud = seedret['user-data']
+ pp2d_kwargs = {'required': ['user-data', 'meta-data'],
+ 'optional': ['vendor-data']}
+
+ try:
+ seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
found.append(self.seed_dir)
- LOG.debug("Using seeded cache data from %s", self.seed_dir)
+ LOG.debug("Using seeded data from %s", self.seed_dir)
+ except ValueError as e:
+ pass
+
+ if self.seed_dir in found:
+ mydata = _merge_new_seed(mydata, seeded)
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
# but not over external media
- if 'seedfrom' in self.ds_cfg and self.ds_cfg['seedfrom']:
- found.append("ds_config")
- md["seedfrom"] = self.ds_cfg['seedfrom']
+ if self.ds_cfg.get('seedfrom'):
+ found.append("ds_config_seedfrom")
+ mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
- # if ds_cfg has 'user-data' and 'meta-data'
+ # fields appropriately named can also just come from the datasource
+ # config (ie, 'user-data', 'meta-data', 'vendor-data' there)
if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
- if self.ds_cfg['user-data']:
- ud = self.ds_cfg['user-data']
- if self.ds_cfg['meta-data'] is not False:
- md = util.mergemanydict([md, self.ds_cfg['meta-data']])
- if 'ds_config' not in found:
- found.append("ds_config")
+ mydata = _merge_new_seed(mydata, self.ds_cfg)
+ found.append("ds_config")
+
+ def _pp2d_callback(mp, data):
+ util.pathprefix2dict(mp, **data)
label = self.ds_cfg.get('fs_label', "cidata")
if label is not None:
@@ -102,15 +109,21 @@ class DataSourceNoCloud(sources.DataSource):
try:
LOG.debug("Attempting to use data from %s", dev)
- (newmd, newud) = util.mount_cb(dev, util.read_seeded)
- md = util.mergemanydict([newmd, md])
- ud = newud
+ try:
+ seeded = util.mount_cb(dev, _pp2d_callback)
+ except ValueError as e:
+ if dev in label_list:
+ LOG.warn("device %s with label=%s not a"
+ "valid seed.", dev, label)
+ continue
+
+ mydata = _merge_new_seed(mydata, seeded)
# For seed from a device, the default mode is 'net'.
# that is more likely to be what is desired. If they want
# dsmode of local, then they must specify that.
- if 'dsmode' not in md:
- md['dsmode'] = "net"
+ if 'dsmode' not in mydata['meta-data']:
+ mydata['meta-data'] = "net"
LOG.debug("Using data from %s", dev)
found.append(dev)
@@ -133,8 +146,8 @@ class DataSourceNoCloud(sources.DataSource):
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
- if "seedfrom" in md:
- seedfrom = md["seedfrom"]
+ if "seedfrom" in mydata['meta-data']:
+ seedfrom = mydata['meta-data']["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
@@ -144,7 +157,7 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
- if 'network-interfaces' in md:
+ if 'network-interfaces' in mydata['meta-data']:
seeded_interfaces = self.dsmode
# This could throw errors, but the user told us to do it
@@ -153,25 +166,30 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Using seeded cache data from %s", seedfrom)
# Values in the command line override those from the seed
- md = util.mergemanydict([md, md_seed])
+ mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
+ md_seed])
+ mydata['user-data'] = ud
found.append(seedfrom)
# Now that we have exhausted any other places merge in the defaults
- md = util.mergemanydict([md, defaults])
+ mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
+ defaults])
# Update the network-interfaces if metadata had 'network-interfaces'
# entry and this is the local datasource, or 'seedfrom' was used
# and the source of the seed was self.dsmode
# ('local' for NoCloud, 'net' for NoCloudNet')
- if ('network-interfaces' in md and
+ if ('network-interfaces' in mydata['meta-data'] and
(self.dsmode in ("local", seeded_interfaces))):
LOG.debug("Updating network interfaces from %s", self)
- self.distro.apply_network(md['network-interfaces'])
+ self.distro.apply_network(
+ mydata['meta-data']['network-interfaces'])
- if md['dsmode'] == self.dsmode:
+ if mydata['meta-data']['dsmode'] == self.dsmode:
self.seed = ",".join(found)
- self.metadata = md
- self.userdata_raw = ud
+ self.metadata = mydata['meta-data']
+ self.userdata_raw = mydata['user-data']
+ self.vendordata = mydata['vendor-data']
return True
LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
@@ -222,6 +240,16 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
return True
+def _merge_new_seed(cur, seeded):
+ ret = cur.copy()
+ ret['meta-data'] = util.mergemanydict([cur['meta-data'],
+ util.load_yaml(seeded['meta-data'])])
+ ret['user-data'] = seeded['user-data']
+ if 'vendor-data' in seeded:
+ ret['vendor-data'] = seeded['vendor-data']
+ return ret
+
+
class DataSourceNoCloudNet(DataSourceNoCloud):
def __init__(self, sys_cfg, distro, paths):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 6593ce6e..140c7814 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -25,7 +25,9 @@
# requests on the console. For example, to get the hostname, you
# would send "GET hostname" on /dev/ttyS1.
#
-
+# Certain behavior is defined by the DataDictionary
+# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
+# Comments with "@datadictionary" are snippets of the definition
import base64
from cloudinit import log as logging
@@ -43,10 +45,11 @@ SMARTOS_ATTRIB_MAP = {
'local-hostname': ('hostname', True),
'public-keys': ('root_authorized_keys', True),
'user-script': ('user-script', False),
- 'user-data': ('user-data', False),
+ 'legacy-user-data': ('user-data', False),
+ 'user-data': ('cloud-init:user-data', False),
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
- 'availability_zone': ('datacenter_name', True),
+ 'availability_zone': ('sdc:datacenter_name', True),
'vendordata': ('sdc:operator-script', False),
}
@@ -71,7 +74,11 @@ BUILTIN_DS_CONFIG = {
'seed_timeout': 60,
'no_base64_decode': ['root_authorized_keys',
'motd_sys_info',
- 'iptables_disable'],
+ 'iptables_disable',
+ 'user-data',
+ 'user-script',
+ 'sdc:datacenter_name',
+ ],
'base64_keys': [],
'base64_all': False,
'disk_aliases': {'ephemeral0': '/dev/vdb'},
@@ -88,6 +95,11 @@ BUILTIN_CLOUD_CONFIG = {
'device': 'ephemeral0'}],
}
+# @datadictionary: this is legacy path for placing files from metadata
+# per the SmartOS location. It is not preferable, but is done for
+# legacy reasons
+LEGACY_USER_D = "/var/db"
+
class DataSourceSmartOS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
@@ -107,6 +119,9 @@ class DataSourceSmartOS(sources.DataSource):
self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
self.b64_keys = self.ds_cfg.get('base64_keys')
self.b64_all = self.ds_cfg.get('base64_all')
+ self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
+ self.user_script_d = os.path.join(self.paths.get_cpath("scripts"),
+ 'per-boot')
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -144,14 +159,32 @@ class DataSourceSmartOS(sources.DataSource):
smartos_noun, strip = attribute
md[ci_noun] = self.query(smartos_noun, strip=strip)
+ # @datadictionary: This key may contain a program that is written
+ # to a file in the filesystem of the guest on each boot and then
+ # executed. It may be of any format that would be considered
+ # executable in the guest instance.
+ u_script = md.get('user-script')
+ u_script_f = "%s/99_user_script" % self.user_script_d
+ u_script_l = "%s/user-script" % LEGACY_USER_D
+ write_boot_content(u_script, u_script_f, link=u_script_l, shebang=True,
+ mode=0700)
+
+ # @datadictionary: This key has no defined format, but its value
+ # is written to the file /var/db/mdata-user-data on each boot prior
+ # to the phase that runs user-script. This file is not to be executed.
+ # This allows a configuration file of some kind to be injected into
+ # the machine to be consumed by the user-script when it runs.
+ u_data = md.get('legacy-user-data')
+ u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
+ write_boot_content(u_data, u_data_f)
+
+ # Handle the cloud-init regular meta
if not md['local-hostname']:
md['local-hostname'] = system_uuid
ud = None
if md['user-data']:
ud = md['user-data']
- elif md['user-script']:
- ud = md['user-script']
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
@@ -279,6 +312,62 @@ def dmi_data():
return (sys_uuid.lower().strip(), sys_type.strip())
+def write_boot_content(content, content_f, link=None, shebang=False,
+ mode=0400):
+ """
+ Write the content to content_f. Under the following rules:
+ 1. If no content, remove the file
+ 2. Write the content
+ 3. If executable and no file magic, add it
+ 4. If there is a link, create it
+
+ @param content: what to write
+ @param content_f: the file name
+ @param backup_d: the directory to save the backup at
+ @param link: if defined, location to create a symlink to
+ @param shebang: if no file magic, set shebang
+ @param mode: file mode
+
+ Becuase of the way that Cloud-init executes scripts (no shell),
+ a script will fail to execute if does not have a magic bit (shebang) set
+ for the file. If shebang=True, then the script will be checked for a magic
+ bit and to the SmartOS default of assuming that bash.
+ """
+
+ if not content and os.path.exists(content_f):
+ os.unlink(content_f)
+ if link and os.path.islink(link):
+ os.unlink(link)
+ if not content:
+ return
+
+ util.write_file(content_f, content, mode=mode)
+
+ if shebang and not content.startswith("#!"):
+ try:
+ cmd = ["file", "--brief", "--mime-type", content_f]
+ (f_type, _err) = util.subp(cmd)
+ LOG.debug("script %s mime type is %s", content_f, f_type)
+ if f_type.strip() == "text/plain":
+ new_content = "\n".join(["#!/bin/bash", content])
+ util.write_file(content_f, new_content, mode=mode)
+ LOG.debug("added shebang to file %s", content_f)
+
+ except Exception as e:
+ util.logexc(LOG, ("Failed to identify script type for %s" %
+ content_f, e))
+
+ if link:
+ try:
+ if os.path.islink(link):
+ os.unlink(link)
+ if content and os.path.exists(content_f):
+ util.ensure_dir(os.path.dirname(link))
+ os.symlink(content_f, link)
+ except IOError as e:
+ util.logexc(LOG, "failed establishing content link", e)
+
+
# Used to match classes to dependencies
datasources = [
(DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 4b3bf62f..fef4d460 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -129,7 +129,7 @@ class DataSource(object):
# when the kernel named them 'vda' or 'xvda'
# we want to return the correct value for what will actually
# exist in this instance
- mappings = {"sd": ("vd", "xvd")}
+ mappings = {"sd": ("vd", "xvd", "vtb")}
for (nfrom, tlist) in mappings.iteritems():
if not short_name.startswith(nfrom):
continue
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 5c33d1e4..76a8e29b 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -20,6 +20,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import httplib
import time
import urllib
@@ -33,6 +34,8 @@ from cloudinit import version
LOG = logging.getLogger(__name__)
+NOT_FOUND = httplib.NOT_FOUND
+
# Check if requests has ssl support (added in requests >= 0.8.8)
SSL_ENABLED = False
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
@@ -76,6 +79,31 @@ def combine_url(base, *add_ons):
return url
+# Made to have same accessors as UrlResponse so that the
+# read_file_or_url can return this or that object and the
+# 'user' of those objects will not need to know the difference.
+class StringResponse(object):
+ def __init__(self, contents, code=200):
+ self.code = code
+ self.headers = {}
+ self.contents = contents
+ self.url = None
+
+ def ok(self, *args, **kwargs): # pylint: disable=W0613
+ if self.code != 200:
+ return False
+ return True
+
+ def __str__(self):
+ return self.contents
+
+
+class FileResponse(StringResponse):
+ def __init__(self, path, contents, code=200):
+ StringResponse.__init__(self, contents, code=code)
+ self.url = path
+
+
class UrlResponse(object):
def __init__(self, response):
self._response = response
@@ -146,17 +174,19 @@ def existsurl(url, ssl_details=None, timeout=None):
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True):
+ check_status=True, allow_redirects=True, exception_cb=None):
return _readurl(url, data=data, timeout=timeout, retries=retries,
sec_between=sec_between, headers=headers,
headers_cb=headers_cb, ssl_details=ssl_details,
check_status=check_status,
- allow_redirects=allow_redirects)
+ allow_redirects=allow_redirects,
+ exception_cb=exception_cb)
def _readurl(url, data=None, timeout=None, retries=0, sec_between=1,
headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True, method='GET'):
+ check_status=True, allow_redirects=True, exception_cb=None,
+ method='GET'):
url = _cleanurl(url)
req_args = {
'url': url,
@@ -203,14 +233,13 @@ def _readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# Handle retrying ourselves since the built-in support
# doesn't handle sleeping between tries...
for i in range(0, manual_tries):
+ req_args['headers'] = headers_cb(url)
+ filtered_req_args = {}
+ for (k, v) in req_args.items():
+ if k == 'data':
+ continue
+ filtered_req_args[k] = v
try:
- req_args['headers'] = headers_cb(url)
- filtered_req_args = {}
- for (k, v) in req_args.items():
- if k == 'data':
- continue
- filtered_req_args[k] = v
-
LOG.debug("[%s/%s] open '%s' with %s configuration", i,
manual_tries, url, filtered_req_args)
@@ -236,6 +265,8 @@ def _readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# ssl exceptions are not going to get fixed by waiting a
# few seconds
break
+ if exception_cb and not exception_cb(req_args.copy(), excps[-1]):
+ break
if i + 1 < manual_tries and sec_between > 0:
LOG.debug("Please wait %s seconds while we wait to try again",
sec_between)
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 3032ef70..de6487d8 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -311,7 +311,8 @@ class UserDataProcessor(object):
def _attach_part(self, outer_msg, part):
"""
Attach a message to an outer message. outermsg must be a MIMEMultipart.
- Modifies a header in the outer message to keep track of number of attachments.
+ Modifies a header in the outer message to keep track of number of
+ attachments.
"""
part_count = self._multi_part_count(outer_msg)
self._process_before_attach(part, part_count + 1)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 9bafd5b3..87b0c853 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -26,6 +26,7 @@ from StringIO import StringIO
import contextlib
import copy as obj_copy
+import ctypes
import errno
import glob
import grp
@@ -37,6 +38,7 @@ import os.path
import platform
import pwd
import random
+import re
import shutil
import socket
import stat
@@ -73,31 +75,6 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
-# Made to have same accessors as UrlResponse so that the
-# read_file_or_url can return this or that object and the
-# 'user' of those objects will not need to know the difference.
-class StringResponse(object):
- def __init__(self, contents, code=200):
- self.code = code
- self.headers = {}
- self.contents = contents
- self.url = None
-
- def ok(self, *args, **kwargs): # pylint: disable=W0613
- if self.code != 200:
- return False
- return True
-
- def __str__(self):
- return self.contents
-
-
-class FileResponse(StringResponse):
- def __init__(self, path, contents, code=200):
- StringResponse.__init__(self, contents, code=code)
- self.url = path
-
-
class ProcessExecutionError(IOError):
MESSAGE_TMPL = ('%(description)s\n'
@@ -402,11 +379,11 @@ def is_ipv4(instr):
return False
try:
- toks = [x for x in toks if (int(x) < 256 and int(x) > 0)]
+ toks = [x for x in toks if int(x) < 256 and int(x) >= 0]
except:
return False
- return (len(toks) == 4)
+ return len(toks) == 4
def get_cfg_option_bool(yobj, key, default=False):
@@ -659,8 +636,8 @@ def read_optional_seed(fill, base="", ext="", timeout=5):
fill['user-data'] = ud
fill['meta-data'] = md
return True
- except IOError as e:
- if e.errno == errno.ENOENT:
+ except url_helper.UrlError as e:
+ if e.code == url_helper.NOT_FOUND:
return False
raise
@@ -699,7 +676,7 @@ def fetch_ssl_details(paths=None):
def read_file_or_url(url, timeout=5, retries=10,
headers=None, data=None, sec_between=1, ssl_details=None,
- headers_cb=None):
+ headers_cb=None, exception_cb=None):
url = url.lstrip()
if url.startswith("/"):
url = "file://%s" % url
@@ -707,7 +684,14 @@ def read_file_or_url(url, timeout=5, retries=10,
if data:
LOG.warn("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
- return FileResponse(file_path, contents=load_file(file_path))
+ try:
+ contents = load_file(file_path)
+ except IOError as e:
+ code = e.errno
+ if e.errno == errno.ENOENT:
+ code = url_helper.NOT_FOUND
+ raise url_helper.UrlError(cause=e, code=code, headers=None)
+ return url_helper.FileResponse(file_path, contents=contents)
else:
return url_helper.readurl(url,
timeout=timeout,
@@ -716,7 +700,8 @@ def read_file_or_url(url, timeout=5, retries=10,
headers_cb=headers_cb,
data=data,
sec_between=sec_between,
- ssl_details=ssl_details)
+ ssl_details=ssl_details,
+ exception_cb=exception_cb)
def load_yaml(blob, default=None, allowed=(dict,)):
@@ -885,8 +870,8 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
IP_address canonical_hostname [aliases...]
Fields of the entry are separated by any number of blanks and/or tab
- characters. Text from a "#" character until the end of the line is a
- comment, and is ignored. Host names may contain only alphanumeric
+ characters. Text from a "#" character until the end of the line is a
+ comment, and is ignored. Host names may contain only alphanumeric
characters, minus signs ("-"), and periods ("."). They must begin with
an alphabetic character and end with an alphanumeric character.
Optional aliases provide for name changes, alternate spellings, shorter
@@ -970,7 +955,7 @@ def is_resolvable(name):
pass
_DNS_REDIRECT_IP = badips
if badresults:
- LOG.debug("detected dns redirection: %s" % badresults)
+ LOG.debug("detected dns redirection: %s", badresults)
try:
result = socket.getaddrinfo(name, None)
@@ -997,7 +982,7 @@ def gethostbyaddr(ip):
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
- return (is_resolvable(urlparse.urlparse(url).hostname))
+ return is_resolvable(urlparse.urlparse(url).hostname)
def search_for_mirror(candidates):
@@ -1322,11 +1307,26 @@ def mounts():
mounted = {}
try:
# Go through mounts to see what is already mounted
- mount_locs = load_file("/proc/mounts").splitlines()
+ if os.path.exists("/proc/mounts"):
+ mount_locs = load_file("/proc/mounts").splitlines()
+ method = 'proc'
+ else:
+ (mountoutput, _err) = subp("mount")
+ mount_locs = mountoutput.splitlines()
+ method = 'mount'
+ mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
for mpline in mount_locs:
- # Format at: man fstab
+ # Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
+ # FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
try:
- (dev, mp, fstype, opts, _freq, _passno) = mpline.split()
+ if method == 'proc':
+ (dev, mp, fstype, opts, _freq, _passno) = mpline.split()
+ else:
+ m = re.search(mountre, mpline)
+ dev = m.group(1)
+ mp = m.group(2)
+ fstype = m.group(3)
+ opts = m.group(4)
except:
continue
# If the name of the mount point contains spaces these
@@ -1337,9 +1337,9 @@ def mounts():
'mountpoint': mp,
'opts': opts,
}
- LOG.debug("Fetched %s mounts from %s", mounted, "/proc/mounts")
+ LOG.debug("Fetched %s mounts from %s", mounted, method)
except (IOError, OSError):
- logexc(LOG, "Failed fetching mount points from /proc/mounts")
+ logexc(LOG, "Failed fetching mount points")
return mounted
@@ -1396,7 +1396,7 @@ def get_builtin_cfg():
def sym_link(source, link):
- LOG.debug("Creating symbolic link from %r => %r" % (link, source))
+ LOG.debug("Creating symbolic link from %r => %r", link, source)
os.symlink(source, link)
@@ -1424,12 +1424,27 @@ def time_rfc2822():
def uptime():
uptime_str = '??'
+ method = 'unknown'
try:
- contents = load_file("/proc/uptime").strip()
- if contents:
- uptime_str = contents.split()[0]
+ if os.path.exists("/proc/uptime"):
+ method = '/proc/uptime'
+ contents = load_file("/proc/uptime").strip()
+ if contents:
+ uptime_str = contents.split()[0]
+ else:
+ method = 'ctypes'
+ libc = ctypes.CDLL('/lib/libc.so.7')
+ size = ctypes.c_size_t()
+ buf = ctypes.c_int()
+ size.value = ctypes.sizeof(buf)
+ libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
+ ctypes.byref(size), None, 0)
+ now = time.time()
+ bootup = buf.value
+ uptime_str = now - bootup
+
except:
- logexc(LOG, "Unable to read uptime from /proc/uptime")
+ logexc(LOG, "Unable to read uptime using method: %s" % method)
return uptime_str
@@ -1768,6 +1783,19 @@ def parse_mtab(path):
return None
+def parse_mount(path):
+ (mountoutput, _err) = subp("mount")
+ mount_locs = mountoutput.splitlines()
+ for line in mount_locs:
+ m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
+ devpth = m.group(1)
+ mount_point = m.group(2)
+ fs_type = m.group(3)
+ if mount_point == path:
+ return devpth, fs_type, mount_point
+ return None
+
+
def get_mount_info(path, log=LOG):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)
@@ -1801,8 +1829,10 @@ def get_mount_info(path, log=LOG):
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log)
- else:
+ elif os.path.exists("/etc/mtab"):
return parse_mtab(path)
+ else:
+ return parse_mount(path)
def which(program):
@@ -1815,7 +1845,7 @@ def which(program):
if is_exe(program):
return program
else:
- for path in os.environ["PATH"].split(os.pathsep):
+ for path in os.environ.get("PATH", "").split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
@@ -1869,3 +1899,28 @@ def expand_dotted_devname(dotted):
return toks
else:
return (dotted, None)
+
+
+def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
+ # return a dictionary populated with keys in 'required' and 'optional'
+ # by reading files in prefix + delim + entry
+ if required is None:
+ required = []
+ if optional is None:
+ optional = []
+
+ missing = []
+ ret = {}
+ for f in required + optional:
+ try:
+ ret[f] = load_file(base + delim + f, quiet=False)
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ if f in required:
+ missing.append(f)
+
+ if len(missing):
+ raise ValueError("Missing required files: %s", ','.join(missing))
+
+ return ret
diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt
index e4d23cc9..74e07b62 100644
--- a/doc/examples/cloud-config-landscape.txt
+++ b/doc/examples/cloud-config-landscape.txt
@@ -6,6 +6,9 @@
#
# Note: 'tags' should be specified as a comma delimited string
# rather than a list.
+#
+# You can get example key/values by running 'landscape-config',
+# answer question, then look at /etc/landscape/client.config
landscape:
client:
url: "https://landscape.canonical.com/message-system"
@@ -13,3 +16,7 @@ landscape:
data_path: "/var/lib/landscape/client"
http_proxy: "http://my.proxy.com/foobar"
tags: "server,cloud"
+ computer_title = footitle
+ https_proxy = fooproxy
+ registration_key = fookey
+ account_name = fooaccount
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index c9ae79f4..52a8f92b 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -1,4 +1,5 @@
-import sys, os
+import os
+import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst
index 8b63e520..e63f311f 100644
--- a/doc/sources/smartos/README.rst
+++ b/doc/sources/smartos/README.rst
@@ -16,11 +16,35 @@ responds with the status and if "SUCCESS" returns until a single ".\n".
New versions of the SmartOS tooling will include support for base64 encoded data.
-Userdata
---------
-
-In SmartOS parlance, user-data is a actually meta-data. This userdata can be
-provided as key-value pairs.
+Meta-data channels
+------------------
+
+Cloud-init supports three modes of delivering user/meta-data via the flexible
+channels of SmartOS.
+
+* user-data is written to /var/db/user-data
+ - per the spec, user-data is for consumption by the end-user, not provisioning
+ tools
+ - cloud-init entirely ignores this channel other than writting it to disk
+ - removal of the meta-data key means that /var/db/user-data gets removed
+ - a backup of previous meta-data is maintained as /var/db/user-data.<timestamp>
+ - <timestamp> is the epoch time when cloud-init ran
+
+* user-script is written to /var/lib/cloud/scripts/per-boot/99_user_data
+ - this is executed each boot
+ - a link is created to /var/db/user-script
+ - previous versions of the user-script is written to
+ /var/lib/cloud/scripts/per-boot.backup/99_user_script.<timestamp>.
+ - <timestamp> is the epoch time when cloud-init ran.
+ - when the 'user-script' meta-data key goes missing, the user-script is
+ removed from the file system, although a backup is maintained.
+ - if the script is not shebanged (i.e. starts with #!<executable>), then
+ or is not an executable, cloud-init will add a shebang of "#!/bin/bash"
+
+* cloud-init:user-data is treated like on other Clouds.
+ - this channel is used for delivering _all_ cloud-init instructions
+ - scripts delivered over this channel must be well formed (i.e. must have
+ a shebang)
Cloud-init supports reading the traditional meta-data fields supported by the
SmartOS tools. These are:
@@ -32,19 +56,49 @@ SmartOS tools. These are:
Note: At this time iptables_disable and enable_motd_sys_info are read but
are not actioned.
-user-script
------------
-
-SmartOS traditionally supports sending over a user-script for execution at the
-rc.local level. Cloud-init supports running user-scripts as if they were
-cloud-init user-data. In this sense, anything with a shell interpreter
-directive will run.
-
-user-data and user-script
--------------------------
-
-In the event that a user defines the meta-data key of "user-data" it will
-always supersede any user-script data. This is for consistency.
+disabling user-script
+---------------------
+
+Cloud-init uses the per-boot script functionality to handle the execution
+of the user-script. If you want to prevent this use a cloud-config of:
+
+#cloud-config
+cloud_final_modules:
+ - scripts-per-once
+ - scripts-per-instance
+ - scripts-user
+ - ssh-authkey-fingerprints
+ - keys-to-console
+ - phone-home
+ - final-message
+ - power-state-change
+
+Alternatively you can use the json patch method
+#cloud-config-jsonp
+[
+ { "op": "replace",
+ "path": "/cloud_final_modules",
+ "value": ["scripts-per-once",
+ "scripts-per-instance",
+ "scripts-user",
+ "ssh-authkey-fingerprints",
+ "keys-to-console",
+ "phone-home",
+ "final-message",
+ "power-state-change"]
+ }
+]
+
+The default cloud-config includes "script-per-boot". Cloud-init will still
+ingest and write the user-data but will not execute it, when you disable
+the per-boot script handling.
+
+Note: Unless you have an explicit use-case, it is recommended that you not
+ disable the per-boot script execution, especially if you are using
+ any of the life-cycle management features of SmartOS.
+
+The cloud-config needs to be delivered over the cloud-init:user-data channel
+in order for cloud-init to ingest it.
base64
------
@@ -54,6 +108,8 @@ are provided by SmartOS:
* root_authorized_keys
* enable_motd_sys_info
* iptables_disable
+ * user-data
+ * user-script
This list can be changed through system config of variable 'no_base64_decode'.
diff --git a/setup.py b/setup.py
index 8d18b97e..9118e5f6 100755
--- a/setup.py
+++ b/setup.py
@@ -63,7 +63,7 @@ def tiny_p(cmd, capture=True):
(out, err) = sp.communicate()
ret = sp.returncode # pylint: disable=E1101
if ret not in [0]:
- raise RuntimeError("Failed running %s [rc=%s] (%s, %s)"
+ raise RuntimeError("Failed running %s [rc=%s] (%s, %s)"
% (cmd, ret, out, err))
return (out, err)
@@ -102,7 +102,7 @@ class InitsysInstallData(install):
" specifying a init system!") % (", ".join(INITSYS_TYPES)))
elif self.init_system:
self.distribution.data_files.append(
- (INITSYS_ROOTS[self.init_system],
+ (INITSYS_ROOTS[self.init_system],
INITSYS_FILES[self.init_system]))
# Force that command to reinitalize (with new file list)
self.distribution.reinitialize_command('install_data', True)
@@ -134,7 +134,7 @@ setuptools.setup(name='cloud-init',
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
],
install_requires=read_requires(),
- cmdclass = {
+ cmdclass={
# Use a subclass for install that handles
# adding on the right init system configuration files
'install': InitsysInstallData,
diff --git a/sysvinit/freebsd/cloudconfig b/sysvinit/freebsd/cloudconfig
new file mode 100755
index 00000000..15d7ab95
--- /dev/null
+++ b/sysvinit/freebsd/cloudconfig
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+# PROVIDE: cloudconfig
+# REQUIRE: cloudinit cloudinitlocal
+# BEFORE: cloudfinal
+
+. /etc/rc.subr
+
+name="cloudconfig"
+command="/usr/bin/cloud-init"
+start_cmd="cloudconfig_start"
+stop_cmd=":"
+rcvar="cloudinit_enable"
+start_precmd="cloudinit_override"
+start_cmd="cloudconfig_start"
+
+: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
+
+cloudinit_override()
+{
+ # If there exist sysconfig/default variable override files use it...
+ if [ -f /etc/default/cloud-init ]; then
+ . /etc/default/cloud-init
+ fi
+}
+
+cloudconfig_start()
+{
+ echo "${command} starting"
+ ${command} ${cloudinit_config} modules --mode config
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudfinal b/sysvinit/freebsd/cloudfinal
new file mode 100755
index 00000000..49945ecd
--- /dev/null
+++ b/sysvinit/freebsd/cloudfinal
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+# PROVIDE: cloudfinal
+# REQUIRE: LOGIN cloudinit cloudconfig cloudinitlocal
+# REQUIRE: cron mail sshd swaplate
+
+. /etc/rc.subr
+
+name="cloudfinal"
+command="/usr/bin/cloud_init"
+start_cmd="cloudfinal_start"
+stop_cmd=":"
+rcvar="cloudinit_enable"
+start_precmd="cloudinit_override"
+start_cmd="cloudfinal_start"
+
+: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
+
+cloudinit_override()
+{
+ # If there exist sysconfig/default variable override files use it...
+ if [ -f /etc/default/cloud-init ]; then
+ . /etc/default/cloud-init
+ fi
+}
+
+cloudfinal_start()
+{
+ echo -n "${command} starting"
+ ${command} ${cloudinit_config} modules --mode final
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit
new file mode 100755
index 00000000..8d5ff10e
--- /dev/null
+++ b/sysvinit/freebsd/cloudinit
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+# PROVIDE: cloudinit
+# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal
+# BEFORE: cloudconfig cloudfinal
+
+. /etc/rc.subr
+
+name="cloudinit"
+command="/usr/bin/cloud_init"
+start_cmd="cloudinit_start"
+stop_cmd=":"
+rcvar="cloudinit_enable"
+start_precmd="cloudinit_override"
+start_cmd="cloudinit_start"
+
+: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
+
+cloudinit_override()
+{
+ # If there exist sysconfig/default variable override files use it...
+ if [ -f /etc/default/cloud-init ]; then
+ . /etc/default/cloud-init
+ fi
+}
+
+cloudinit_start()
+{
+ echo -n "${command} starting"
+ ${command} ${cloudinit_config} init
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudinitlocal b/sysvinit/freebsd/cloudinitlocal
new file mode 100755
index 00000000..b55705c0
--- /dev/null
+++ b/sysvinit/freebsd/cloudinitlocal
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+# PROVIDE: cloudinitlocal
+# REQUIRE: mountcritlocal
+# BEFORE: NETWORKING FILESYSTEMS cloudinit cloudconfig cloudfinal
+
+. /etc/rc.subr
+
+name="cloudinitlocal"
+command="/usr/bin/cloud-init"
+start_cmd="cloudlocal_start"
+stop_cmd=":"
+rcvar="cloudinit_enable"
+start_precmd="cloudinit_override"
+start_cmd="cloudlocal_start"
+
+: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
+
+cloudinit_override()
+{
+ # If there exist sysconfig/default variable override files use it...
+ if [ -f /etc/default/cloud-init ]; then
+ . /etc/default/cloud-init
+ fi
+}
+
+cloudlocal_start()
+{
+ echo -n "${command} starting"
+ ${command} ${cloudinit_config} init --local
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index c0da0983..5b4f4208 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -187,7 +187,8 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
def populate_dir(path, files):
- os.makedirs(path)
+ if not os.path.exists(path):
+ os.makedirs(path)
for (name, content) in files.iteritems():
with open(os.path.join(path, name), "w") as fp:
fp.write(content)
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index b4b20e51..8c41c1ca 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -196,7 +196,7 @@ class TestCmdlineUrl(MockerTestCase):
mock_readurl = self.mocker.replace(url_helper.readurl,
passthrough=False)
mock_readurl(url, ARGS, KWARGS)
- self.mocker.result(util.StringResponse(payload))
+ self.mocker.result(url_helper.StringResponse(payload))
self.mocker.replay()
self.assertEqual((key, url, None),
@@ -212,7 +212,7 @@ class TestCmdlineUrl(MockerTestCase):
mock_readurl = self.mocker.replace(url_helper.readurl,
passthrough=False)
mock_readurl(url, ARGS, KWARGS)
- self.mocker.result(util.StringResponse(payload))
+ self.mocker.result(url_helper.StringResponse(payload))
self.mocker.replay()
self.assertEqual((key, url, payload),
@@ -225,7 +225,7 @@ class TestCmdlineUrl(MockerTestCase):
cmdline = "ro %s=%s bar=1" % (key, url)
self.mocker.replace(url_helper.readurl, passthrough=False)
- self.mocker.result(util.StringResponse(""))
+ self.mocker.result(url_helper.StringResponse(""))
self.mocker.replay()
self.assertEqual((None, None, None),
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index bd6cdd5d..937b88c1 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -288,10 +288,11 @@ class TestConfigDriveDataSource(MockerTestCase):
self.assertEqual(["/dev/vdb", "/dev/zdd"],
ds.find_candidate_devs())
- # verify that partitions are not considered
+ # verify that partitions are considered, that have correct label.
devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
"TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
- self.assertEqual([], ds.find_candidate_devs())
+ self.assertEqual(["/dev/vdb3"],
+ ds.find_candidate_devs())
finally:
util.find_devs_with = orig_find_devs_with
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index 2007a6df..bd5d23fd 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -119,9 +119,10 @@ class TestMAASDataSource(mocker.MockerTestCase):
mock_request(url, headers=None, timeout=mocker.ANY,
data=mocker.ANY, sec_between=mocker.ANY,
ssl_details=mocker.ANY, retries=mocker.ANY,
- headers_cb=my_headers_cb)
+ headers_cb=my_headers_cb,
+ exception_cb=mocker.ANY)
resp = valid.get(key)
- self.mocker.result(util.StringResponse(resp))
+ self.mocker.result(url_helper.StringResponse(resp))
self.mocker.replay()
(userdata, metadata) = DataSourceMAAS.read_maas_seed_url(my_seed,
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 7328b240..af575a10 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -97,6 +97,41 @@ class TestNoCloudDataSource(MockerTestCase):
self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
self.assertTrue(ret)
+ def test_nocloud_seed_with_vendordata(self):
+ md = {'instance-id': 'IID', 'dsmode': 'local'}
+ ud = "USER_DATA_HERE"
+ vd = "THIS IS MY VENDOR_DATA"
+
+ populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
+ {'user-data': ud, 'meta-data': yaml.safe_dump(md),
+ 'vendor-data': vd})
+
+ sys_cfg = {
+ 'datasource': {'NoCloud': {'fs_label': None}}
+ }
+
+ ds = DataSourceNoCloud.DataSourceNoCloud
+
+ dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertEqual(dsrc.userdata_raw, ud)
+ self.assertEqual(dsrc.metadata, md)
+ self.assertEqual(dsrc.vendordata, vd)
+ self.assertTrue(ret)
+
+ def test_nocloud_no_vendordata(self):
+ populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
+ {'user-data': "ud", 'meta-data': "instance-id: IID\n"})
+
+ sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+
+ ds = DataSourceNoCloud.DataSourceNoCloud
+
+ dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertEqual(dsrc.userdata_raw, "ud")
+ self.assertFalse(dsrc.vendordata)
+ self.assertTrue(ret)
class TestParseCommandLineData(MockerTestCase):
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 956767d8..ae427bb5 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -27,6 +27,10 @@ from cloudinit import helpers
from cloudinit.sources import DataSourceSmartOS
from mocker import MockerTestCase
+import os
+import os.path
+import re
+import stat
import uuid
MOCK_RETURNS = {
@@ -35,7 +39,11 @@ MOCK_RETURNS = {
'disable_iptables_flag': None,
'enable_motd_sys_info': None,
'test-var1': 'some data',
- 'user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
+ 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
+ 'sdc:datacenter_name': 'somewhere2',
+ 'sdc:operator-script': '\n'.join(['bin/true', '']),
+ 'user-data': '\n'.join(['something', '']),
+ 'user-script': '\n'.join(['/bin/true', '']),
}
DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc')
@@ -101,6 +109,7 @@ class TestSmartOSDataSource(MockerTestCase):
def setUp(self):
# makeDir comes from MockerTestCase
self.tmp = self.makeDir()
+ self.legacy_user_d = self.makeDir()
# patch cloud_dir, so our 'seed_dir' is guaranteed empty
self.paths = helpers.Paths({'cloud_dir': self.tmp})
@@ -138,6 +147,7 @@ class TestSmartOSDataSource(MockerTestCase):
sys_cfg['datasource'] = sys_cfg.get('datasource', {})
sys_cfg['datasource']['SmartOS'] = ds_cfg
+ self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)])
self.apply_patches([(mod, 'get_serial', _get_serial)])
self.apply_patches([(mod, 'dmi_data', _dmi_data)])
dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None,
@@ -194,7 +204,7 @@ class TestSmartOSDataSource(MockerTestCase):
# metadata provided base64_all of true
my_returns = MOCK_RETURNS.copy()
my_returns['base64_all'] = "true"
- for k in ('hostname', 'user-data'):
+ for k in ('hostname', 'cloud-init:user-data'):
my_returns[k] = base64.b64encode(my_returns[k])
dsrc = self._get_ds(mockdata=my_returns)
@@ -202,7 +212,7 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertTrue(ret)
self.assertEquals(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
- self.assertEquals(MOCK_RETURNS['user-data'],
+ self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
dsrc.userdata_raw)
self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
dsrc.metadata['public-keys'])
@@ -213,9 +223,9 @@ class TestSmartOSDataSource(MockerTestCase):
def test_b64_userdata(self):
my_returns = MOCK_RETURNS.copy()
- my_returns['b64-user-data'] = "true"
+ my_returns['b64-cloud-init:user-data'] = "true"
my_returns['b64-hostname'] = "true"
- for k in ('hostname', 'user-data'):
+ for k in ('hostname', 'cloud-init:user-data'):
my_returns[k] = base64.b64encode(my_returns[k])
dsrc = self._get_ds(mockdata=my_returns)
@@ -223,7 +233,8 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertTrue(ret)
self.assertEquals(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
- self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
+ self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
+ dsrc.userdata_raw)
self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
dsrc.metadata['public-keys'])
@@ -238,13 +249,131 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertTrue(ret)
self.assertEquals(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
- self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
+ self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
+ dsrc.userdata_raw)
def test_userdata(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
+ self.assertEquals(MOCK_RETURNS['user-data'],
+ dsrc.metadata['legacy-user-data'])
+ self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
+ dsrc.userdata_raw)
+
+ def test_sdc_scripts(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['user-script'],
+ dsrc.metadata['user-script'])
+
+ legacy_script_f = "%s/user-script" % self.legacy_user_d
+ self.assertTrue(os.path.exists(legacy_script_f))
+ self.assertTrue(os.path.islink(legacy_script_f))
+ user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
+ self.assertEquals(user_script_perm, '700')
+
+ def test_scripts_shebanged(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['user-script'],
+ dsrc.metadata['user-script'])
+
+ legacy_script_f = "%s/user-script" % self.legacy_user_d
+ self.assertTrue(os.path.exists(legacy_script_f))
+ self.assertTrue(os.path.islink(legacy_script_f))
+ shebang = None
+ with open(legacy_script_f, 'r') as f:
+ shebang = f.readlines()[0].strip()
+ self.assertEquals(shebang, "#!/bin/bash")
+ user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
+ self.assertEquals(user_script_perm, '700')
+
+ def test_scripts_shebang_not_added(self):
+ """
+ Test that the SmartOS requirement that plain text scripts
+ are executable. This test makes sure that plain texts scripts
+ with out file magic have it added appropriately by cloud-init.
+ """
+
+ my_returns = MOCK_RETURNS.copy()
+ my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl',
+ 'print("hi")', ''])
+
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(my_returns['user-script'],
+ dsrc.metadata['user-script'])
+
+ legacy_script_f = "%s/user-script" % self.legacy_user_d
+ self.assertTrue(os.path.exists(legacy_script_f))
+ self.assertTrue(os.path.islink(legacy_script_f))
+ shebang = None
+ with open(legacy_script_f, 'r') as f:
+ shebang = f.readlines()[0].strip()
+ self.assertEquals(shebang, "#!/usr/bin/perl")
+
+ def test_scripts_removed(self):
+ """
+ Since SmartOS requires that the user script is fetched
+ each boot, we want to make sure that the information
+ is backed-up for user-review later.
+
+ This tests the behavior of when a script is removed. It makes
+ sure that a) the previous script is backed-up; and 2) that
+ there is no script remaining.
+ """
+
+ script_d = os.path.join(self.tmp, "scripts", "per-boot")
+ os.makedirs(script_d)
+
+ test_script_f = "%s/99_user_script" % script_d
+ with open(test_script_f, 'w') as f:
+ f.write("TEST DATA")
+
+ my_returns = MOCK_RETURNS.copy()
+ del my_returns['user-script']
+
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertFalse(dsrc.metadata['user-script'])
+ self.assertFalse(os.path.exists(test_script_f))
+
+ def test_userdata_removed(self):
+ """
+ User-data in the SmartOS world is supposed to be written to a file
+ each and every boot. This tests to make sure that in the event the
+ legacy user-data is removed, the existing user-data is backed-up and
+ there is no /var/db/user-data left.
+ """
+
+ user_data_f = "%s/mdata-user-data" % self.legacy_user_d
+ with open(user_data_f, 'w') as f:
+ f.write("PREVIOUS")
+
+ my_returns = MOCK_RETURNS.copy()
+ del my_returns['user-data']
+
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertFalse(dsrc.metadata.get('legacy-user-data'))
+
+ found_new = False
+ for root, _dirs, files in os.walk(self.legacy_user_d):
+ for name in files:
+ name_f = os.path.join(root, name)
+ permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
+ if re.match(r'.*\/mdata-user-data$', name_f):
+ found_new = True
+ print name_f
+ self.assertEquals(permissions, '400')
+
+ self.assertFalse(found_new)
def test_disable_iptables_flag(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
index 18d36d86..dd87665d 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/test_ec2_util.py
@@ -35,6 +35,14 @@ class TestEc2Util(helpers.TestCase):
self.assertEquals('', userdata)
@hp.activate
+ def test_userdata_fetch_fail_server_not_found(self):
+ hp.register_uri(hp.GET,
+ 'http://169.254.169.254/%s/user-data' % (self.VERSION),
+ status=404)
+ userdata = eu.get_instance_userdata(self.VERSION)
+ self.assertEquals('', userdata)
+
+ @hp.activate
def test_metadata_fetch_no_keys(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py
new file mode 100644
index 00000000..c68c263c
--- /dev/null
+++ b/tests/unittests/test_pathprefix2dict.py
@@ -0,0 +1,40 @@
+from cloudinit import util
+
+from mocker import MockerTestCase
+from tests.unittests.helpers import populate_dir
+
+
+class TestPathPrefix2Dict(MockerTestCase):
+
+ def setUp(self):
+ self.tmp = self.makeDir()
+
+ def test_required_only(self):
+ dirdata = {'f1': 'f1content', 'f2': 'f2content'}
+ populate_dir(self.tmp, dirdata)
+
+ ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2'])
+ self.assertEqual(dirdata, ret)
+
+ def test_required_missing(self):
+ dirdata = {'f1': 'f1content'}
+ populate_dir(self.tmp, dirdata)
+ kwargs = {'required': ['f1', 'f2']}
+ self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
+
+ def test_no_required_and_optional(self):
+ dirdata = {'f1': 'f1c', 'f2': 'f2c'}
+ populate_dir(self.tmp, dirdata)
+
+ ret = util.pathprefix2dict(self.tmp, required=None,
+ optional=['f1', 'f2'])
+ self.assertEqual(dirdata, ret)
+
+ def test_required_and_optional(self):
+ dirdata = {'f1': 'f1c', 'f2': 'f2c'}
+ populate_dir(self.tmp, dirdata)
+
+ ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2'])
+ self.assertEqual(dirdata, ret)
+
+# vi: ts=4 expandtab
diff --git a/tools/read-dependencies b/tools/read-dependencies
index f89391bc..fee3efcf 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -1,32 +1,23 @@
-#!/bin/sh
+#!/usr/bin/env python
-set -e
+import os
+import sys
-find_root() {
- local topd
- if [ -z "${CLOUD_INIT_TOP_D}" ]; then
- topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
- else
- topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
- fi
- [ $? -eq 0 -a -f "${topd}/setup.py" ] || return
- ROOT_DIR="$topd"
-}
-fail() { echo "$0:" "$@" 1>&2; exit 1; }
+if 'CLOUD_INIT_TOP_D' in os.environ:
+ topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
+else:
+ topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-if ! find_root; then
- fail "Unable to locate 'setup.py' file that should " \
- "exist in the cloud-init root directory."
-fi
+for fname in ("setup.py", "requirements.txt"):
+ if not os.path.isfile(os.path.join(topd, fname)):
+ sys.stderr.write("Unable to locate '%s' file that should "
+ "exist in cloud-init root directory." % fname)
+ sys.exit(1)
-REQUIRES="$ROOT_DIR/requirements.txt"
+with open(os.path.join(topd, "requirements.txt"), "r") as fp:
+ for line in fp:
+ if not line.strip() or line.startswith("#"):
+ continue
+ sys.stdout.write(line)
-if [ ! -e "$REQUIRES" ]; then
- fail "Unable to find 'requirements.txt' file located at '$REQUIRES'"
-fi
-
-# Filter out comments and empty lines
-DEPS=$(sed -n -e 's,#.*,,' -e '/./p' "$REQUIRES") &&
- [ -n "$DEPS" ] ||
- fail "failed to read deps from '${REQUIRES}'"
-echo "$DEPS" | sort -d -f
+sys.exit(0)
diff --git a/tools/read-version b/tools/read-version
index 599f52cd..d02651e9 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -1,32 +1,26 @@
-#!/bin/sh
+#!/usr/bin/env python
-set -e
+import os
+import re
+import sys
-find_root() {
- local topd
- if [ -z "${CLOUD_INIT_TOP_D}" ]; then
- topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
- else
- topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
- fi
- [ $? -eq 0 -a -f "${topd}/setup.py" ] || return
- ROOT_DIR="$topd"
-}
-fail() { echo "$0:" "$@" 1>&2; exit 1; }
+if 'CLOUD_INIT_TOP_D' in os.environ:
+ topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
+else:
+ topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-if ! find_root; then
- fail "Unable to locate 'setup.py' file that should " \
- "exist in the cloud-init root directory."
-fi
+for fname in ("setup.py", "ChangeLog"):
+ if not os.path.isfile(os.path.join(topd, fname)):
+ sys.stderr.write("Unable to locate '%s' file that should "
+ "exist in cloud-init root directory." % fname)
+ sys.exit(1)
-CHNG_LOG="$ROOT_DIR/ChangeLog"
+vermatch = re.compile(r"^[0-9]+[.][0-9]+[.][0-9]+:$")
-if [ ! -e "$CHNG_LOG" ]; then
- fail "Unable to find 'ChangeLog' file located at '$CHNG_LOG'"
-fi
+with open(os.path.join(topd, "ChangeLog"), "r") as fp:
+ for line in fp:
+ if vermatch.match(line):
+ sys.stdout.write(line.strip()[:-1] + "\n")
+ break
-VERSION=$(sed -n '/^[0-9]\+[.][0-9]\+[.][0-9]\+:/ {s/://; p; :a;n; ba; }' \
- "$CHNG_LOG") &&
- [ -n "$VERSION" ] ||
- fail "failed to get version from '$CHNG_LOG'"
-echo "$VERSION"
+sys.exit(0)
diff --git a/tools/run-pep8 b/tools/run-pep8
index 20e594bc..cfce5edd 100755
--- a/tools/run-pep8
+++ b/tools/run-pep8
@@ -1,15 +1,7 @@
#!/bin/bash
-ci_files='cloudinit/*.py cloudinit/config/*.py'
-test_files=$(find tests -name "*.py")
-def_files="$ci_files $test_files"
-
if [ $# -eq 0 ]; then
- files=( )
- for f in $def_files; do
- [ -f "$f" ] || { echo "failed, $f not a file" 1>&2; exit 1; }
- files[${#files[@]}]=${f}
- done
+ files=( bin/cloud-init $(find * -name "*.py" -type f) )
else
files=( "$@" );
fi
@@ -44,4 +36,3 @@ cmd=(
echo -e "\nRunning 'cloudinit' pep8:"
echo "${cmd[@]}"
"${cmd[@]}"
-
diff --git a/tools/run-pylint b/tools/run-pylint
index b74efda9..0fe0c64a 100755
--- a/tools/run-pylint
+++ b/tools/run-pylint
@@ -1,7 +1,7 @@
#!/bin/bash
if [ $# -eq 0 ]; then
- files=( $(find * -name "*.py" -type f) )
+ files=( bin/cloud-init $(find * -name "*.py" -type f) )
else
files=( "$@" );
fi
@@ -16,6 +16,7 @@ cmd=(
--rcfile=$RC_FILE
--disable=R
--disable=I
+ --dummy-variables-rgx="_"
"${files[@]}"
)