summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog13
-rw-r--r--Makefile2
-rwxr-xr-xbin/cloud-init2
-rw-r--r--cloudinit/config/cc_apt_configure.py4
-rw-r--r--cloudinit/config/cc_apt_pipelining.py2
-rw-r--r--cloudinit/config/cc_disk_setup.py3
-rw-r--r--cloudinit/config/cc_emit_upstart.py27
-rw-r--r--cloudinit/config/cc_grub_dpkg.py21
-rw-r--r--cloudinit/config/cc_locale.py6
-rw-r--r--cloudinit/config/cc_snappy.py280
-rw-r--r--cloudinit/distros/__init__.py15
-rw-r--r--cloudinit/ec2_utils.py15
-rw-r--r--cloudinit/handlers/__init__.py29
-rw-r--r--cloudinit/settings.py2
-rw-r--r--cloudinit/sources/DataSourceAzure.py132
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py2
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py8
-rw-r--r--cloudinit/sources/DataSourceGCE.py93
-rw-r--r--cloudinit/sources/DataSourceMAAS.py25
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py5
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py1
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py89
-rw-r--r--cloudinit/sources/helpers/openstack.py2
-rw-r--r--cloudinit/stages.py24
-rw-r--r--cloudinit/url_helper.py16
-rw-r--r--cloudinit/user_data.py17
-rw-r--r--cloudinit/util.py99
-rw-r--r--config/cloud.cfg1
-rw-r--r--doc/examples/cloud-config.txt2
-rw-r--r--doc/rtd/topics/datasources.rst2
-rw-r--r--doc/sources/cloudstack/README.rst29
-rwxr-xr-xpackages/brpm2
-rw-r--r--packages/debian/control.in3
-rw-r--r--systemd/cloud-config.service5
-rw-r--r--systemd/cloud-config.target3
-rw-r--r--systemd/cloud-final.service5
-rw-r--r--systemd/cloud-init.service4
-rwxr-xr-xsysvinit/redhat/cloud-init-local5
-rw-r--r--tests/unittests/helpers.py42
-rw-r--r--tests/unittests/test__init__.py8
-rw-r--r--tests/unittests/test_data.py46
-rw-r--r--tests/unittests/test_datasource/test_azure.py217
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py15
-rw-r--r--tests/unittests/test_datasource/test_digitalocean.py3
-rw-r--r--tests/unittests/test_datasource/test_gce.py54
-rw-r--r--tests/unittests/test_datasource/test_maas.py8
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py14
-rw-r--r--tests/unittests/test_datasource/test_openstack.py8
-rw-r--r--tests/unittests/test_datasource/test_smartos.py228
-rw-r--r--tests/unittests/test_ec2_util.py4
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure.py13
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py30
-rw-r--r--tests/unittests/test_handler/test_handler_snappy.py306
-rw-r--r--tests/unittests/test_pathprefix2dict.py10
-rw-r--r--tests/unittests/test_templating.py5
-rw-r--r--tests/unittests/test_util.py96
-rwxr-xr-xtools/hacking.py2
-rwxr-xr-xtools/validate-yaml.py3
58 files changed, 1679 insertions, 428 deletions
diff --git a/ChangeLog b/ChangeLog
index e74b69b2..ff5f2aac 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -26,6 +26,19 @@
- Azure: utilze gpt support for ephemeral formating [Daniel Watkins]
- CloudStack: support fetching password from virtual router [Daniel Watkins]
(LP: #1422388)
+ - readurl, read_file_or_url returns bytes, user must convert as necessary
+ - SmartOS: use v2 metadata service (LP: #1436417) [Daniel Watkins]
+ - NoCloud: fix local datasource claiming found without explicit dsmode
+ - Snappy: add support for installing snappy packages and configuring.
+ - systemd: use network-online instead of network.target (LP: #1440180)
+ [Steve Langasek]
+ - Add functionality to fixate the uid of a newly added user.
+ - Don't overwrite the hostname if the user has changed it after we set it.
+ - GCE datasource does not handle instance ssh keys (LP: 1403617)
+ - sysvinit: make cloud-init-local run before network (LP: #1275098)
+ [Surojit Pathak]
+ - Azure: do not re-set hostname if user has changed it (LP: #1375252)
+ - Fix exception when running with no arguments on Python 3. [Daniel Watkins]
0.7.6:
- open 0.7.6
- Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/Makefile b/Makefile
index 009257ca..bb0c5253 100644
--- a/Makefile
+++ b/Makefile
@@ -20,7 +20,7 @@ pep8:
@$(CWD)/tools/run-pep8 $(PY_FILES)
pyflakes:
- pyflakes $(PY_FILES)
+ @$(CWD)/tools/tox-venv py34 pyflakes $(PY_FILES)
pip-requirements:
@echo "Installing cloud-init dependencies..."
diff --git a/bin/cloud-init b/bin/cloud-init
index e95fea28..1d3e7ee3 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -505,6 +505,8 @@ def status_wrapper(name, args, data_d=None, link_d=None):
v1[mode]['errors'] = [str(e) for e in errors]
except Exception as e:
+ util.logexc(LOG, "failed of stage %s", mode)
+ print_exc("failed run of stage %s", mode)
v1[mode]['errors'] = [str(e)]
v1[mode]['finished'] = time.time()
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index de72903f..2c51d116 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -51,6 +51,10 @@ EXPORT_GPG_KEYID = """
def handle(name, cfg, cloud, log, _args):
+ if util.is_false(cfg.get('apt_configure_enabled', True)):
+ log.debug("Skipping module named %s, disabled by config.", name)
+ return
+
release = get_release()
mirrors = find_apt_mirror_info(cloud, cfg)
if not mirrors or "primary" not in mirrors:
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index e5629175..40c32c84 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -43,7 +43,7 @@ def handle(_name, cfg, _cloud, log, _args):
write_apt_snippet("0", log, DEFAULT_FILE)
elif apt_pipe_value_s in ("none", "unchanged", "os"):
return
- elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]:
+ elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
else:
log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index f899210b..e2ce6db4 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -304,8 +304,7 @@ def is_disk_used(device):
# If the child count is higher 1, then there are child nodes
# such as partition or device mapper nodes
- use_count = [x for x in enumerate_disk(device)]
- if len(use_count.splitlines()) > 1:
+ if len(list(enumerate_disk(device))) > 1:
return True
# If we see a file system, then its used
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index 6d376184..86ae97ab 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -21,11 +21,31 @@
import os
from cloudinit.settings import PER_ALWAYS
+from cloudinit import log as logging
from cloudinit import util
frequency = PER_ALWAYS
distros = ['ubuntu', 'debian']
+LOG = logging.getLogger(__name__)
+
+
+def is_upstart_system():
+ if not os.path.isfile("/sbin/initctl"):
+ LOG.debug("no /sbin/initctl located")
+ return False
+
+ myenv = os.environ.copy()
+ if 'UPSTART_SESSION' in myenv:
+ del myenv['UPSTART_SESSION']
+ check_cmd = ['initctl', 'version']
+ try:
+ (out, err) = util.subp(check_cmd, env=myenv)
+ return 'upstart' in out
+ except util.ProcessExecutionError as e:
+ LOG.debug("'%s' returned '%s', not using upstart",
+ ' '.join(check_cmd), e.exit_code)
+ return False
def handle(name, _cfg, cloud, log, args):
@@ -34,10 +54,11 @@ def handle(name, _cfg, cloud, log, args):
# Default to the 'cloud-config'
# event for backwards compat.
event_names = ['cloud-config']
- if not os.path.isfile("/sbin/initctl"):
- log.debug(("Skipping module named %s,"
- " no /sbin/initctl located"), name)
+
+ if not is_upstart_system():
+ log.debug("not upstart system, '%s' disabled")
return
+
cfgpath = cloud.paths.get_ipath_cur("cloud_config")
for n in event_names:
cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index e3219e81..456597af 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -25,15 +25,20 @@ from cloudinit import util
distros = ['ubuntu', 'debian']
-def handle(_name, cfg, _cloud, log, _args):
- idevs = None
- idevs_empty = None
+def handle(name, cfg, _cloud, log, _args):
- if "grub-dpkg" in cfg:
- idevs = util.get_cfg_option_str(cfg["grub-dpkg"],
- "grub-pc/install_devices", None)
- idevs_empty = util.get_cfg_option_str(cfg["grub-dpkg"],
- "grub-pc/install_devices_empty", None)
+ mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
+ if not mycfg:
+ mycfg = {}
+
+ enabled = mycfg.get('enabled', True)
+ if util.is_false(enabled):
+ log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
+ return
+
+ idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
+ idevs_empty = util.get_cfg_option_str(mycfg,
+ "grub-pc/install_devices_empty", None)
if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
(os.path.exists("/dev/xvda1")
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 6feaae9d..bbe5fcae 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -27,9 +27,9 @@ def handle(name, cfg, cloud, log, args):
else:
locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
- if not locale:
- log.debug(("Skipping module named %s, "
- "no 'locale' configuration found"), name)
+ if util.is_false(locale):
+ log.debug("Skipping module named %s, disabled by config: %s",
+ name, locale)
return
log.debug("Setting locale to %s", locale)
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
new file mode 100644
index 00000000..7aaec94a
--- /dev/null
+++ b/cloudinit/config/cc_snappy.py
@@ -0,0 +1,280 @@
+# vi: ts=4 expandtab
+#
+"""
+snappy modules allows configuration of snappy.
+Example config:
+ #cloud-config
+ snappy:
+ system_snappy: auto
+ ssh_enabled: False
+ packages: [etcd, pkg2.smoser]
+ config:
+ pkgname:
+ key2: value2
+ pkg2:
+ key1: value1
+ packages_dir: '/writable/user-data/cloud-init/snaps'
+
+ - ssh_enabled:
+ This defaults to 'False'. Set to a non-false value to enable ssh service
+ - snap installation and config
+ The above would install 'etcd', and then install 'pkg2.smoser' with a
+ '<config-file>' argument where 'config-file' has 'config-blob' inside it.
+ If 'pkgname' is installed already, then 'snappy config pkgname <file>'
+ will be called where 'file' has 'pkgname-config-blob' as its content.
+
+ Entries in 'config' can be namespaced or non-namespaced for a package.
+ In either case, the config provided to snappy command is non-namespaced.
+ The package name is provided as it appears.
+
+ If 'packages_dir' has files in it that end in '.snap', then they are
+ installed. Given 3 files:
+ <packages_dir>/foo.snap
+ <packages_dir>/foo.config
+ <packages_dir>/bar.snap
+ cloud-init will invoke:
+ snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
+ snappy install <packages_dir>/bar.snap
+
+ Note, that if provided a 'config' entry for 'ubuntu-core', then
+ cloud-init will invoke: snappy config ubuntu-core <config>
+ Allowing you to configure ubuntu-core in this way.
+"""
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
+import glob
+import tempfile
+import os
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+SNAPPY_CMD = "snappy"
+NAMESPACE_DELIM = '.'
+
+BUILTIN_CFG = {
+ 'packages': [],
+ 'packages_dir': '/writable/user-data/cloud-init/snaps',
+ 'ssh_enabled': False,
+ 'system_snappy': "auto",
+ 'config': {},
+}
+
+
+def parse_filename(fname):
+ fname = os.path.basename(fname)
+ fname_noext = fname.rpartition(".")[0]
+ name = fname_noext.partition("_")[0]
+ shortname = name.partition(".")[0]
+ return(name, shortname, fname_noext)
+
+
+def get_fs_package_ops(fspath):
+ if not fspath:
+ return []
+ ops = []
+ for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
+ (name, shortname, fname_noext) = parse_filename(snapfile)
+ cfg = None
+ for cand in (fname_noext, name, shortname):
+ fpcand = os.path.sep.join([fspath, cand]) + ".config"
+ if os.path.isfile(fpcand):
+ cfg = fpcand
+ break
+ ops.append(makeop('install', name, config=None,
+ path=snapfile, cfgfile=cfg))
+ return ops
+
+
+def makeop(op, name, config=None, path=None, cfgfile=None):
+ return({'op': op, 'name': name, 'config': config, 'path': path,
+ 'cfgfile': cfgfile})
+
+
+def get_package_config(configs, name):
+ # load the package's config from the configs dict.
+ # prefer full-name entry (config-example.canonical)
+ # over short name entry (config-example)
+ if name in configs:
+ return configs[name]
+ return configs.get(name.partition(NAMESPACE_DELIM)[0])
+
+
+def get_package_ops(packages, configs, installed=None, fspath=None):
+ # get the install an config operations that should be done
+ if installed is None:
+ installed = read_installed_packages()
+ short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
+
+ if not packages:
+ packages = []
+ if not configs:
+ configs = {}
+
+ ops = []
+ ops += get_fs_package_ops(fspath)
+
+ for name in packages:
+ ops.append(makeop('install', name, get_package_config(configs, name)))
+
+ to_install = [f['name'] for f in ops]
+ short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
+
+ for name in configs:
+ if name in to_install:
+ continue
+ shortname = name.partition(NAMESPACE_DELIM)[0]
+ if shortname in short_to_install:
+ continue
+ if name in installed or shortname in short_installed:
+ ops.append(makeop('config', name,
+ config=get_package_config(configs, name)))
+
+ # prefer config entries to filepath entries
+ for op in ops:
+ if op['op'] != 'install' or not op['cfgfile']:
+ continue
+ name = op['name']
+ fromcfg = get_package_config(configs, op['name'])
+ if fromcfg:
+ LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
+ op['cfgfile'] = None
+ op['config'] = fromcfg
+
+ return ops
+
+
+def render_snap_op(op, name, path=None, cfgfile=None, config=None):
+ if op not in ('install', 'config'):
+ raise ValueError("cannot render op '%s'" % op)
+
+ shortname = name.partition(NAMESPACE_DELIM)[0]
+ try:
+ cfg_tmpf = None
+ if config is not None:
+ # input to 'snappy config packagename' must have nested data. odd.
+ # config:
+ # packagename:
+ # config
+ # Note, however, we do not touch config files on disk.
+ nested_cfg = {'config': {shortname: config}}
+ (fd, cfg_tmpf) = tempfile.mkstemp()
+ os.write(fd, util.yaml_dumps(nested_cfg).encode())
+ os.close(fd)
+ cfgfile = cfg_tmpf
+
+ cmd = [SNAPPY_CMD, op]
+ if op == 'install':
+ if path:
+ cmd.append("--allow-unauthenticated")
+ cmd.append(path)
+ else:
+ cmd.append(name)
+ if cfgfile:
+ cmd.append(cfgfile)
+ elif op == 'config':
+ cmd += [name, cfgfile]
+
+ util.subp(cmd)
+
+ finally:
+ if cfg_tmpf:
+ os.unlink(cfg_tmpf)
+
+
+def read_installed_packages():
+ ret = []
+ for (name, date, version, dev) in read_pkg_data():
+ if dev:
+ ret.append(NAMESPACE_DELIM.join([name, dev]))
+ else:
+ ret.append(name)
+ return ret
+
+
+def read_pkg_data():
+ out, err = util.subp([SNAPPY_CMD, "list"])
+ pkg_data = []
+ for line in out.splitlines()[1:]:
+ toks = line.split(sep=None, maxsplit=3)
+ if len(toks) == 3:
+ (name, date, version) = toks
+ dev = None
+ else:
+ (name, date, version, dev) = toks
+ pkg_data.append((name, date, version, dev,))
+ return pkg_data
+
+
+def disable_enable_ssh(enabled):
+ LOG.debug("setting enablement of ssh to: %s", enabled)
+ # do something here that would enable or disable
+ not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
+ if enabled:
+ util.del_file(not_to_be_run)
+ # this is an indempotent operation
+ util.subp(["systemctl", "start", "ssh"])
+ else:
+ # this is an indempotent operation
+ util.subp(["systemctl", "stop", "ssh"])
+ util.write_file(not_to_be_run, "cloud-init\n")
+
+
+def system_is_snappy():
+ # channel.ini is configparser loadable.
+ # snappy will move to using /etc/system-image/config.d/*.ini
+ # this is certainly not a perfect test, but good enough for now.
+ content = util.load_file("/etc/system-image/channel.ini", quiet=True)
+ if 'ubuntu-core' in content.lower():
+ return True
+ if os.path.isdir("/etc/system-image/config.d/"):
+ return True
+ return False
+
+
+def set_snappy_command():
+ global SNAPPY_CMD
+ if util.which("snappy-go"):
+ SNAPPY_CMD = "snappy-go"
+ else:
+ SNAPPY_CMD = "snappy"
+ LOG.debug("snappy command is '%s'", SNAPPY_CMD)
+
+
+def handle(name, cfg, cloud, log, args):
+ cfgin = cfg.get('snappy')
+ if not cfgin:
+ cfgin = {}
+ mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
+
+ sys_snappy = str(mycfg.get("system_snappy", "auto"))
+ if util.is_false(sys_snappy):
+ LOG.debug("%s: System is not snappy. disabling", name)
+ return
+
+ if sys_snappy.lower() == "auto" and not(system_is_snappy()):
+ LOG.debug("%s: 'auto' mode, and system not snappy", name)
+ return
+
+ set_snappy_command()
+
+ pkg_ops = get_package_ops(packages=mycfg['packages'],
+ configs=mycfg['config'],
+ fspath=mycfg['packages_dir'])
+
+ fails = []
+ for pkg_op in pkg_ops:
+ try:
+ render_snap_op(**pkg_op)
+ except Exception as e:
+ fails.append((pkg_op, e,))
+ LOG.warn("'%s' failed for '%s': %s",
+ pkg_op['op'], pkg_op['name'], e)
+
+ disable_enable_ssh(mycfg.get('ssh_enabled', False))
+
+ if fails:
+ raise Exception("failed to install/configure snaps")
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index ab874b45..05721922 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -208,6 +208,15 @@ class Distro(object):
and sys_hostname != hostname):
update_files.append(sys_fn)
+ # If something else has changed the hostname after we set it
+ # initially, we should not overwrite those changes (we should
+ # only be setting the hostname once per instance)
+ if (sys_hostname and prev_hostname and
+ sys_hostname != prev_hostname):
+ LOG.info("%s differs from %s, assuming user maintained hostname.",
+ prev_hostname_fn, sys_fn)
+ return
+
# Remove duplicates (incase the previous config filename)
# is the same as the system config filename, don't bother
# doing it twice
@@ -222,11 +231,6 @@ class Distro(object):
util.logexc(LOG, "Failed to write hostname %s to %s", hostname,
fn)
- if (sys_hostname and prev_hostname and
- sys_hostname != prev_hostname):
- LOG.debug("%s differs from %s, assuming user maintained hostname.",
- prev_hostname_fn, sys_fn)
-
# If the system hostname file name was provided set the
# non-fqdn as the transient hostname.
if sys_fn in update_files:
@@ -318,6 +322,7 @@ class Distro(object):
"gecos": '--comment',
"homedir": '--home',
"primary_group": '--gid',
+ "uid": '--uid',
"groups": '--groups',
"passwd": '--password',
"shell": '--shell',
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index e1ed4091..37b92a83 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -41,6 +41,10 @@ class MetadataLeafDecoder(object):
def __call__(self, field, blob):
if not blob:
return blob
+ try:
+ blob = util.decode_binary(blob)
+ except UnicodeDecodeError:
+ return blob
if self._maybe_json_object(blob):
try:
# Assume it's json, unless it fails parsing...
@@ -69,6 +73,8 @@ class MetadataMaterializer(object):
def _parse(self, blob):
leaves = {}
children = []
+ blob = util.decode_binary(blob)
+
if not blob:
return (leaves, children)
@@ -117,12 +123,12 @@ class MetadataMaterializer(object):
child_url = url_helper.combine_url(base_url, c)
if not child_url.endswith("/"):
child_url += "/"
- child_blob = str(self._caller(child_url))
+ child_blob = self._caller(child_url)
child_contents[c] = self._materialize(child_blob, child_url)
leaf_contents = {}
for (field, resource) in leaves.items():
leaf_url = url_helper.combine_url(base_url, resource)
- leaf_blob = self._caller(leaf_url).contents
+ leaf_blob = self._caller(leaf_url)
leaf_contents[field] = self._leaf_decoder(field, leaf_blob)
joined = {}
joined.update(child_contents)
@@ -180,10 +186,13 @@ def get_instance_metadata(api_version='latest',
ssl_details=ssl_details, timeout=timeout,
retries=retries)
+ def mcaller(url):
+ return caller(url).contents
+
try:
response = caller(md_url)
materializer = MetadataMaterializer(response.contents,
- md_url, caller,
+ md_url, mcaller,
leaf_decoder=leaf_decoder)
md = materializer.materialize()
if not isinstance(md, (dict)):
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 6b7abbcd..53d5604a 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -163,12 +163,19 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):
def _extract_first_or_bytes(blob, size):
- # Extract the first line upto X bytes or X bytes from more than the
- # first line if the first line does not contain enough bytes
- first_line = blob.split("\n", 1)[0]
- if len(first_line) >= size:
- start = first_line[:size]
- else:
+ # Extract the first line or upto X symbols for text objects
+ # Extract first X bytes for binary objects
+ try:
+ if isinstance(blob, six.string_types):
+ start = blob.split("\n", 1)[0]
+ else:
+ # We want to avoid decoding the whole blob (it might be huge)
+ # By taking 4*size bytes we guarantee to decode size utf8 chars
+ start = blob[:4 * size].decode(errors='ignore').split("\n", 1)[0]
+ if len(start) >= size:
+ start = start[:size]
+ except UnicodeDecodeError:
+ # Bytes array doesn't contain text so return chunk of raw bytes
start = blob[0:size]
return start
@@ -183,6 +190,11 @@ def _escape_string(text):
except TypeError:
# Give up...
pass
+ except AttributeError:
+ # We're in Python3 and received blob as text
+ # No escaping is needed because bytes are printed
+ # as 'b\xAA\xBB' automatically in Python3
+ pass
return text
@@ -251,7 +263,10 @@ def fixup_handler(mod, def_freq=PER_INSTANCE):
def type_from_starts_with(payload, default=None):
- payload_lc = payload.lower()
+ try:
+ payload_lc = util.decode_binary(payload).lower()
+ except UnicodeDecodeError:
+ return default
payload_lc = payload_lc.lstrip()
for text in INCLUSION_SRCH:
if payload_lc.startswith(text):
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 5efcb0b0..b61e5613 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -47,7 +47,7 @@ CFG_BUILTIN = {
],
'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
- 'syslog_fix_perms': 'syslog:adm',
+ 'syslog_fix_perms': ['syslog:adm', 'root:adm'],
'system_info': {
'paths': {
'cloud_dir': '/var/lib/cloud',
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 6e030217..a19d9ca2 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
+import contextlib
import crypt
import fnmatch
import os
@@ -66,6 +67,36 @@ DS_CFG_PATH = ['datasource', DS_NAME]
DEF_EPHEMERAL_LABEL = 'Temporary Storage'
+def get_hostname(hostname_command='hostname'):
+ return util.subp(hostname_command, capture=True)[0].strip()
+
+
+def set_hostname(hostname, hostname_command='hostname'):
+ util.subp([hostname_command, hostname])
+
+
+@contextlib.contextmanager
+def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
+ """
+ Set a temporary hostname, restoring the previous hostname on exit.
+
+ Will have the value of the previous hostname when used as a context
+ manager, or None if the hostname was not changed.
+ """
+ policy = cfg['hostname_bounce']['policy']
+ previous_hostname = get_hostname(hostname_command)
+ if (not util.is_true(cfg.get('set_hostname'))
+ or util.is_false(policy)
+ or (previous_hostname == temp_hostname and policy != 'force')):
+ yield None
+ return
+ set_hostname(temp_hostname, hostname_command)
+ try:
+ yield previous_hostname
+ finally:
+ set_hostname(previous_hostname, hostname_command)
+
+
class DataSourceAzureNet(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -154,33 +185,40 @@ class DataSourceAzureNet(sources.DataSource):
# the directory to be protected.
write_files(ddir, files, dirmode=0o700)
- # handle the hostname 'publishing'
- try:
- handle_set_hostname(mycfg.get('set_hostname'),
- self.metadata.get('local-hostname'),
- mycfg['hostname_bounce'])
- except Exception as e:
- LOG.warn("Failed publishing hostname: %s", e)
- util.logexc(LOG, "handling set_hostname failed")
-
- try:
- invoke_agent(mycfg['agent_command'])
- except util.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- mycfg['agent_command'])
-
- shcfgxml = os.path.join(ddir, "SharedConfig.xml")
- wait_for = [shcfgxml]
-
- fp_files = []
- for pk in self.cfg.get('_pubkeys', []):
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
+ temp_hostname = self.metadata.get('local-hostname')
+ hostname_command = mycfg['hostname_bounce']['hostname_command']
+ with temporary_hostname(temp_hostname, mycfg,
+ hostname_command=hostname_command) \
+ as previous_hostname:
+ if (previous_hostname is not None
+ and util.is_true(mycfg.get('set_hostname'))):
+ cfg = mycfg['hostname_bounce']
+ try:
+ perform_hostname_bounce(hostname=temp_hostname,
+ cfg=cfg,
+ prev_hostname=previous_hostname)
+ except Exception as e:
+ LOG.warn("Failed publishing hostname: %s", e)
+ util.logexc(LOG, "handling set_hostname failed")
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
- func=wait_for_files,
- args=(wait_for + fp_files,))
+ try:
+ invoke_agent(mycfg['agent_command'])
+ except util.ProcessExecutionError:
+ # claim the datasource even if the command failed
+ util.logexc(LOG, "agent command '%s' failed.",
+ mycfg['agent_command'])
+
+ shcfgxml = os.path.join(ddir, "SharedConfig.xml")
+ wait_for = [shcfgxml]
+
+ fp_files = []
+ for pk in self.cfg.get('_pubkeys', []):
+ bname = str(pk['fingerprint'] + ".crt")
+ fp_files += [os.path.join(ddir, bname)]
+
+ missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+ func=wait_for_files,
+ args=(wait_for + fp_files,))
if len(missing):
LOG.warn("Did not find files, but going on: %s", missing)
@@ -299,39 +337,15 @@ def support_new_ephemeral(cfg):
return mod_list
-def handle_set_hostname(enabled, hostname, cfg):
- if not util.is_true(enabled):
- return
-
- if not hostname:
- LOG.warn("set_hostname was true but no local-hostname")
- return
-
- apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
- interface=cfg['interface'],
- command=cfg['command'],
- hostname_command=cfg['hostname_command'])
-
-
-def apply_hostname_bounce(hostname, policy, interface, command,
- hostname_command="hostname"):
+def perform_hostname_bounce(hostname, cfg, prev_hostname):
# set the hostname to 'hostname' if it is not already set to that.
# then, if policy is not off, bounce the interface using command
- prev_hostname = util.subp(hostname_command, capture=True)[0].strip()
-
- util.subp([hostname_command, hostname])
-
- msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
- (prev_hostname, hostname, policy, interface))
-
- if util.is_false(policy):
- LOG.debug("pubhname: policy false, skipping [%s]", msg)
- return
-
- if prev_hostname == hostname and policy != "force":
- LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
- return
+ command = cfg['command']
+ interface = cfg['interface']
+ policy = cfg['policy']
+ msg = ("hostname=%s policy=%s interface=%s" %
+ (hostname, policy, interface))
env = os.environ.copy()
env['interface'] = interface
env['hostname'] = hostname
@@ -344,9 +358,9 @@ def apply_hostname_bounce(hostname, policy, interface, command,
shell = not isinstance(command, (list, tuple))
# capture=False, see comments in bug 1202758 and bug 1206164.
util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
- kwargs={'args': command, 'shell': shell, 'capture': False,
- 'env': env})
+ get_uptime=True, func=util.subp,
+ kwargs={'args': command, 'shell': shell, 'capture': False,
+ 'env': env})
def crtfile_to_pubkey(fname):
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 76597116..f8f94759 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -59,7 +59,7 @@ class DataSourceCloudSigma(sources.DataSource):
LOG.warn("failed to get hypervisor product name via dmi data")
return False
else:
- LOG.debug("detected hypervisor as {}".format(sys_product_name))
+ LOG.debug("detected hypervisor as %s", sys_product_name)
return 'cloudsigma' in sys_product_name.lower()
LOG.warn("failed to query dmi data for system product name")
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 76ddaa9d..5d47564d 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -54,9 +54,13 @@ class DataSourceDigitalOcean(sources.DataSource):
def get_data(self):
caller = functools.partial(util.read_file_or_url,
timeout=self.timeout, retries=self.retries)
- md = ec2_utils.MetadataMaterializer(str(caller(self.metadata_address)),
+
+ def mcaller(url):
+ return caller(url).contents
+
+ md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address),
base_url=self.metadata_address,
- caller=caller)
+ caller=mcaller)
self.metadata = md.materialize()
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 6936c74e..f4ed915d 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -30,6 +30,31 @@ BUILTIN_DS_CONFIG = {
REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
+class GoogleMetadataFetcher(object):
+ headers = {'X-Google-Metadata-Request': True}
+
+ def __init__(self, metadata_address):
+ self.metadata_address = metadata_address
+
+ def get_value(self, path, is_text):
+ value = None
+ try:
+ resp = url_helper.readurl(url=self.metadata_address + path,
+ headers=self.headers)
+ except url_helper.UrlError as exc:
+ msg = "url %s raised exception %s"
+ LOG.debug(msg, path, exc)
+ else:
+ if resp.code == 200:
+ if is_text:
+ value = util.decode_binary(resp.contents)
+ else:
+ value = resp.contents
+ else:
+ LOG.debug("url %s returned code %s", path, resp.code)
+ return value
+
+
class DataSourceGCE(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -50,18 +75,16 @@ class DataSourceGCE(sources.DataSource):
return public_key
def get_data(self):
- # GCE metadata server requires a custom header since v1
- headers = {'X-Google-Metadata-Request': True}
-
- # url_map: (our-key, path, required)
+ # url_map: (our-key, path, required, is_text)
url_map = [
- ('instance-id', 'instance/id', True),
- ('availability-zone', 'instance/zone', True),
- ('local-hostname', 'instance/hostname', True),
- ('public-keys', 'project/attributes/sshKeys', False),
- ('user-data', 'instance/attributes/user-data', False),
- ('user-data-encoding', 'instance/attributes/user-data-encoding',
- False),
+ ('instance-id', ('instance/id',), True, True),
+ ('availability-zone', ('instance/zone',), True, True),
+ ('local-hostname', ('instance/hostname',), True, True),
+ ('public-keys', ('project/attributes/sshKeys',
+ 'instance/attributes/sshKeys'), False, True),
+ ('user-data', ('instance/attributes/user-data',), False, False),
+ ('user-data-encoding', ('instance/attributes/user-data-encoding',),
+ False, True),
]
# if we cannot resolve the metadata server, then no point in trying
@@ -69,37 +92,25 @@ class DataSourceGCE(sources.DataSource):
LOG.debug("%s is not resolvable", self.metadata_address)
return False
+ metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
# iterate over url_map keys to get metadata items
- found = False
- for (mkey, path, required) in url_map:
- try:
- resp = url_helper.readurl(url=self.metadata_address + path,
- headers=headers)
- if resp.code == 200:
- found = True
- self.metadata[mkey] = resp.contents
+ running_on_gce = False
+ for (mkey, paths, required, is_text) in url_map:
+ value = None
+ for path in paths:
+ new_value = metadata_fetcher.get_value(path, is_text)
+ if new_value is not None:
+ value = new_value
+ if value:
+ running_on_gce = True
+ if required and value is None:
+ msg = "required key %s returned nothing. not GCE"
+ if not running_on_gce:
+ LOG.debug(msg, mkey)
else:
- if required:
- msg = "required url %s returned code %s. not GCE"
- if not found:
- LOG.debug(msg, path, resp.code)
- else:
- LOG.warn(msg, path, resp.code)
- return False
- else:
- self.metadata[mkey] = None
- except url_helper.UrlError as e:
- if required:
- msg = "required url %s raised exception %s. not GCE"
- if not found:
- LOG.debug(msg, path, e)
- else:
- LOG.warn(msg, path, e)
- return False
- msg = "Failed to get %s metadata item: %s."
- LOG.debug(msg, path, e)
-
- self.metadata[mkey] = None
+ LOG.warn(msg, mkey)
+ return False
+ self.metadata[mkey] = value
if self.metadata['public-keys']:
lines = self.metadata['public-keys'].splitlines()
@@ -113,7 +124,7 @@ class DataSourceGCE(sources.DataSource):
else:
LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
- return found
+ return running_on_gce
@property
def launch_index(self):
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 082cc58f..c1a0eb61 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -22,7 +22,7 @@ from __future__ import print_function
from email.utils import parsedate
import errno
-import oauthlib
+import oauthlib.oauth1 as oauth1
import os
import time
@@ -36,6 +36,8 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
+BINARY_FIELDS = ('user-data',)
+
class DataSourceMAAS(sources.DataSource):
"""
@@ -185,7 +187,8 @@ def read_maas_seed_dir(seed_d):
md = {}
for fname in files:
try:
- md[fname] = util.load_file(os.path.join(seed_d, fname))
+ md[fname] = util.load_file(os.path.join(seed_d, fname),
+ decode=fname not in BINARY_FIELDS)
except IOError as e:
if e.errno != errno.ENOENT:
raise
@@ -218,6 +221,7 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
'user-data': "%s/%s" % (base_url, 'user-data'),
}
+
md = {}
for name in file_order:
url = files.get(name)
@@ -238,7 +242,10 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
timeout=timeout,
ssl_details=ssl_details)
if resp.ok():
- md[name] = str(resp)
+ if name in BINARY_FIELDS:
+ md[name] = resp.contents
+ else:
+ md[name] = util.decode_binary(resp.contents)
else:
LOG.warn(("Fetching from %s resulted in"
" an invalid http code %s"), url, resp.code)
@@ -263,7 +270,7 @@ def check_seed_contents(content, seed):
if len(missing):
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
- userdata = content.get('user-data', "")
+ userdata = content.get('user-data', b"")
md = {}
for (key, val) in content.items():
if key == 'user-data':
@@ -275,12 +282,18 @@ def check_seed_contents(content, seed):
def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
timestamp=None):
- client = oauthlib.oauth1.Client(
+ if timestamp:
+ timestamp = str(timestamp)
+ else:
+ timestamp = None
+
+ client = oauth1.Client(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=token_key,
resource_owner_secret=token_secret,
- signature_method=oauthlib.SIGNATURE_PLAINTEXT)
+ signature_method=oauth1.SIGNATURE_PLAINTEXT,
+ timestamp=timestamp)
uri, signed_headers, body = client.sign(url)
return signed_headers
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index c26a645c..6a861af3 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -124,7 +124,7 @@ class DataSourceNoCloud(sources.DataSource):
# that is more likely to be what is desired. If they want
# dsmode of local, then they must specify that.
if 'dsmode' not in mydata['meta-data']:
- mydata['dsmode'] = "net"
+ mydata['meta-data']['dsmode'] = "net"
LOG.debug("Using data from %s", dev)
found.append(dev)
@@ -193,7 +193,8 @@ class DataSourceNoCloud(sources.DataSource):
self.vendordata = mydata['vendor-data']
return True
- LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self,
+ mydata['meta-data']['dsmode'])
return False
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 61709c1b..ac2c3b45 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -24,7 +24,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import base64
import os
import pwd
import re
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 9d48beab..c9b497df 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -29,9 +29,12 @@
# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
# Comments with "@datadictionary" are snippets of the definition
-import base64
import binascii
+import contextlib
import os
+import random
+import re
+
import serial
from cloudinit import log as logging
@@ -301,6 +304,65 @@ def get_serial(seed_device, seed_timeout):
return ser
+class JoyentMetadataFetchException(Exception):
+ pass
+
+
+class JoyentMetadataClient(object):
+ """
+ A client implementing v2 of the Joyent Metadata Protocol Specification.
+
+ The full specification can be found at
+ http://eng.joyent.com/mdata/protocol.html
+ """
+ line_regex = re.compile(
+ r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
+ r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
+ r'( (?P<payload>.+))?)')
+
+ def __init__(self, serial):
+ self.serial = serial
+
+ def _checksum(self, body):
+ return '{0:08x}'.format(
+ binascii.crc32(body.encode('utf-8')) & 0xffffffff)
+
+ def _get_value_from_frame(self, expected_request_id, frame):
+ frame_data = self.line_regex.match(frame).groupdict()
+ if int(frame_data['length']) != len(frame_data['body']):
+ raise JoyentMetadataFetchException(
+ 'Incorrect frame length given ({0} != {1}).'.format(
+ frame_data['length'], len(frame_data['body'])))
+ expected_checksum = self._checksum(frame_data['body'])
+ if frame_data['checksum'] != expected_checksum:
+ raise JoyentMetadataFetchException(
+ 'Invalid checksum (expected: {0}; got {1}).'.format(
+ expected_checksum, frame_data['checksum']))
+ if frame_data['request_id'] != expected_request_id:
+ raise JoyentMetadataFetchException(
+ 'Request ID mismatch (expected: {0}; got {1}).'.format(
+ expected_request_id, frame_data['request_id']))
+ if not frame_data.get('payload', None):
+ LOG.debug('No value found.')
+ return None
+ value = util.b64d(frame_data['payload'])
+ LOG.debug('Value "%s" found.', value)
+ return value
+
+ def get_metadata(self, metadata_key):
+ LOG.debug('Fetching metadata key "%s"...', metadata_key)
+ request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
+ message_body = '{0} GET {1}'.format(request_id,
+ util.b64e(metadata_key))
+ msg = 'V2 {0} {1} {2}\n'.format(
+ len(message_body), self._checksum(message_body), message_body)
+ LOG.debug('Writing "%s" to serial port.', msg)
+ self.serial.write(msg.encode('ascii'))
+ response = self.serial.readline().decode('ascii')
+ LOG.debug('Read "%s" from serial port.', response)
+ return self._get_value_from_frame(request_id, response)
+
+
def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
b64=None):
"""Makes a request to via the serial console via "GET <NOUN>"
@@ -314,33 +376,20 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
encoded, so this method relies on being told if the data is base64 or
not.
"""
-
if not noun:
return False
- ser = get_serial(seed_device, seed_timeout)
- ser.write("GET %s\n" % noun.rstrip())
- status = str(ser.readline()).rstrip()
- response = []
- eom_found = False
+ with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser:
+ client = JoyentMetadataClient(ser)
+ response = client.get_metadata(noun)
- if 'SUCCESS' not in status:
- ser.close()
+ if response is None:
return default
- while not eom_found:
- m = ser.readline()
- if m.rstrip() == ".":
- eom_found = True
- else:
- response.append(m)
-
- ser.close()
-
if b64 is None:
b64 = query_data('b64-%s' % noun, seed_device=seed_device,
- seed_timeout=seed_timeout, b64=False,
- default=False, strip=True)
+ seed_timeout=seed_timeout, b64=False,
+ default=False, strip=True)
b64 = util.is_true(b64)
resp = None
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 88c7a198..bd93d22f 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -327,7 +327,7 @@ class ConfigDriveReader(BaseReader):
return os.path.join(*components)
def _path_read(self, path):
- return util.load_file(path)
+ return util.load_file(path, decode=False)
def _fetch_available_versions(self):
if self._versions is None:
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 94fcf4cc..d28e765b 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -148,16 +148,25 @@ class Init(object):
def _initialize_filesystem(self):
util.ensure_dirs(self._initial_subdirs())
log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
- perms = util.get_cfg_option_str(self.cfg, 'syslog_fix_perms')
if log_file:
util.ensure_file(log_file)
- if perms:
- u, g = util.extract_usergroup(perms)
+ perms = self.cfg.get('syslog_fix_perms')
+ if not perms:
+ perms = {}
+ if not isinstance(perms, list):
+ perms = [perms]
+
+ error = None
+ for perm in perms:
+ u, g = util.extract_usergroup(perm)
try:
util.chownbyname(log_file, u, g)
- except OSError:
- util.logexc(LOG, "Unable to change the ownership of %s to "
- "user %s, group %s", log_file, u, g)
+ return
+ except OSError as e:
+ error = e
+
+ LOG.warn("Failed changing perms on '%s'. tried: %s. %s",
+ log_file, ','.join(perms), error)
def read_cfg(self, extra_fns=None):
# None check so that we don't keep on re-loading if empty
@@ -346,7 +355,8 @@ class Init(object):
processed_vd = str(self.datasource.get_vendordata())
if processed_vd is None:
processed_vd = ''
- util.write_file(self._get_ipath('vendordata'), str(processed_vd), 0o600)
+ util.write_file(self._get_ipath('vendordata'), str(processed_vd),
+ 0o600)
def _default_handlers(self, opts=None):
if opts is None:
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 62001dff..0e65f431 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -119,7 +119,7 @@ class UrlResponse(object):
@property
def contents(self):
- return self._response.text
+ return self._response.content
@property
def url(self):
@@ -321,7 +321,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
timeout = int((start_time + max_wait) - now)
reason = ""
- e = None
+ url_exc = None
try:
if headers_cb is not None:
headers = headers_cb(url)
@@ -332,18 +332,20 @@ def wait_for_url(urls, max_wait=None, timeout=None,
check_status=False)
if not response.contents:
reason = "empty response [%s]" % (response.code)
- e = UrlError(ValueError(reason),
- code=response.code, headers=response.headers)
+ url_exc = UrlError(ValueError(reason), code=response.code,
+ headers=response.headers)
elif not response.ok():
reason = "bad status code [%s]" % (response.code)
- e = UrlError(ValueError(reason),
- code=response.code, headers=response.headers)
+ url_exc = UrlError(ValueError(reason), code=response.code,
+ headers=response.headers)
else:
return url
except UrlError as e:
reason = "request error [%s]" % e
+ url_exc = e
except Exception as e:
reason = "unexpected error [%s]" % e
+ url_exc = e
time_taken = int(time.time() - start_time)
status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,
@@ -355,7 +357,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
# This can be used to alter the headers that will be sent
# in the future, for example this is what the MAAS datasource
# does.
- exception_cb(msg=status_msg, exception=e)
+ exception_cb(msg=status_msg, exception=url_exc)
if timeup(max_wait, start_time):
break
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index b11894ce..f7c5787c 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -22,8 +22,6 @@
import os
-import email
-
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
@@ -51,6 +49,7 @@ INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
ARCHIVE_TYPES = ["text/cloud-config-archive"]
UNDEF_TYPE = "text/plain"
ARCHIVE_UNDEF_TYPE = "text/cloud-config"
+ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
# This seems to hit most of the gzip possible content types.
DECOMP_TYPES = [
@@ -237,9 +236,9 @@ class UserDataProcessor(object):
resp = util.read_file_or_url(include_url,
ssl_details=self.ssl_details)
if include_once_on and resp.ok():
- util.write_file(include_once_fn, resp, mode=0o600)
+ util.write_file(include_once_fn, resp.contents, mode=0o600)
if resp.ok():
- content = str(resp)
+ content = resp.contents
else:
LOG.warn(("Fetching from %s resulted in"
" a invalid http code of %s"),
@@ -267,11 +266,15 @@ class UserDataProcessor(object):
content = ent.get('content', '')
mtype = ent.get('type')
if not mtype:
- mtype = handlers.type_from_starts_with(content,
- ARCHIVE_UNDEF_TYPE)
+ default = ARCHIVE_UNDEF_TYPE
+ if isinstance(content, six.binary_type):
+ default = ARCHIVE_UNDEF_BINARY_TYPE
+ mtype = handlers.type_from_starts_with(content, default)
maintype, subtype = mtype.split('/', 1)
if maintype == "text":
+ if isinstance(content, six.binary_type):
+ content = content.decode()
msg = MIMEText(content, _subtype=subtype)
else:
msg = MIMEBase(maintype, subtype)
@@ -338,7 +341,7 @@ def convert_string(raw_data, headers=None):
headers = {}
data = util.decode_binary(util.decomp_gzip(raw_data))
if "mime-version:" in data[0:4096].lower():
- msg = email.message_from_string(data)
+ msg = util.message_from_string(data)
for (key, val) in headers.items():
_replace_header(msg, key, val)
else:
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 4fbdf0a9..cae57770 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -23,6 +23,7 @@
import contextlib
import copy as obj_copy
import ctypes
+import email
import errno
import glob
import grp
@@ -120,14 +121,40 @@ def fully_decoded_payload(part):
if (six.PY3 and
part.get_content_maintype() == 'text' and
isinstance(cte_payload, bytes)):
- charset = part.get_charset() or 'utf-8'
- return cte_payload.decode(charset, errors='surrogateescape')
+ charset = part.get_charset()
+ if charset and charset.input_codec:
+ encoding = charset.input_codec
+ else:
+ encoding = 'utf-8'
+ return cte_payload.decode(encoding, errors='surrogateescape')
return cte_payload
# Path for DMI Data
DMI_SYS_PATH = "/sys/class/dmi/id"
+# dmidecode and /sys/class/dmi/id/* use different names for the same value,
+# this allows us to refer to them by one canonical name
+DMIDECODE_TO_DMI_SYS_MAPPING = {
+ 'baseboard-asset-tag': 'board_asset_tag',
+ 'baseboard-manufacturer': 'board_vendor',
+ 'baseboard-product-name': 'board_name',
+ 'baseboard-serial-number': 'board_serial',
+ 'baseboard-version': 'board_version',
+ 'bios-release-date': 'bios_date',
+ 'bios-vendor': 'bios_vendor',
+ 'bios-version': 'bios_version',
+ 'chassis-asset-tag': 'chassis_asset_tag',
+ 'chassis-manufacturer': 'chassis_vendor',
+ 'chassis-serial-number': 'chassis_serial',
+ 'chassis-version': 'chassis_version',
+ 'system-manufacturer': 'sys_vendor',
+ 'system-product-name': 'product_name',
+ 'system-serial-number': 'product_serial',
+ 'system-uuid': 'product_uuid',
+ 'system-version': 'product_version',
+}
+
class ProcessExecutionError(IOError):
@@ -739,6 +766,10 @@ def fetch_ssl_details(paths=None):
return ssl_details
+def load_tfile_or_url(*args, **kwargs):
+ return(decode_binary(read_file_or_url(*args, **kwargs).contents))
+
+
def read_file_or_url(url, timeout=5, retries=10,
headers=None, data=None, sec_between=1, ssl_details=None,
headers_cb=None, exception_cb=None):
@@ -750,7 +781,7 @@ def read_file_or_url(url, timeout=5, retries=10,
LOG.warn("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
try:
- contents = load_file(file_path)
+ contents = load_file(file_path, decode=False)
except IOError as e:
code = e.errno
if e.errno == errno.ENOENT:
@@ -806,7 +837,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
- md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
+ md_resp = load_tfile_or_url(md_url, timeout, retries, file_retries)
md = None
if md_resp.ok():
md = load_yaml(md_resp.contents, default={})
@@ -966,7 +997,7 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
def get_cmdline_url(names=('cloud-config-url', 'url'),
- starts="#cloud-config", cmdline=None):
+ starts=b"#cloud-config", cmdline=None):
if cmdline is None:
cmdline = get_cmdline()
@@ -982,6 +1013,8 @@ def get_cmdline_url(names=('cloud-config-url', 'url'),
return (None, None, None)
resp = read_file_or_url(url)
+ # allow callers to pass starts as text when comparing to bytes contents
+ starts = encode_text(starts)
if resp.ok() and resp.contents.startswith(starts):
return (key, url, resp.contents)
@@ -2030,7 +2063,7 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
ret = {}
for f in required + optional:
try:
- ret[f] = load_file(base + delim + f, quiet=False)
+ ret[f] = load_file(base + delim + f, quiet=False, decode=False)
except IOError as e:
if e.errno != errno.ENOENT:
raise
@@ -2097,24 +2130,26 @@ def _read_dmi_syspath(key):
"""
Reads dmi data with from /sys/class/dmi/id
"""
-
- dmi_key = "{0}/{1}".format(DMI_SYS_PATH, key)
- LOG.debug("querying dmi data {0}".format(dmi_key))
+ if key not in DMIDECODE_TO_DMI_SYS_MAPPING:
+ return None
+ mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key]
+ dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key)
+ LOG.debug("querying dmi data %s", dmi_key_path)
try:
- if not os.path.exists(dmi_key):
- LOG.debug("did not find {0}".format(dmi_key))
+ if not os.path.exists(dmi_key_path):
+ LOG.debug("did not find %s", dmi_key_path)
return None
- key_data = load_file(dmi_key)
+ key_data = load_file(dmi_key_path)
if not key_data:
- LOG.debug("{0} did not return any data".format(key))
+ LOG.debug("%s did not return any data", dmi_key_path)
return None
- LOG.debug("dmi data {0} returned {0}".format(dmi_key, key_data))
+ LOG.debug("dmi data %s returned %s", dmi_key_path, key_data)
return key_data.strip()
except Exception as e:
- logexc(LOG, "failed read of {0}".format(dmi_key), e)
+ logexc(LOG, "failed read of %s", dmi_key_path, e)
return None
@@ -2126,26 +2161,40 @@ def _call_dmidecode(key, dmidecode_path):
try:
cmd = [dmidecode_path, "--string", key]
(result, _err) = subp(cmd)
- LOG.debug("dmidecode returned '{0}' for '{0}'".format(result, key))
+ LOG.debug("dmidecode returned '%s' for '%s'", result, key)
return result
- except OSError as _err:
- LOG.debug('failed dmidecode cmd: {0}\n{0}'.format(cmd, _err.message))
+ except (IOError, OSError) as _err:
+ LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err.message)
return None
def read_dmi_data(key):
"""
- Wrapper for reading DMI data. This tries to determine whether the DMI
- Data can be read directly, otherwise it will fallback to using dmidecode.
+ Wrapper for reading DMI data.
+
+ This will do the following (returning the first that produces a
+ result):
+ 1) Use a mapping to translate `key` from dmidecode naming to
+ sysfs naming and look in /sys/class/dmi/... for a value.
+ 2) Use `key` as a sysfs key directly and look in /sys/class/dmi/...
+ 3) Fall-back to passing `key` to `dmidecode --string`.
+
+ If all of the above fail to find a value, None will be returned.
"""
- if os.path.exists(DMI_SYS_PATH):
- return _read_dmi_syspath(key)
+ syspath_value = _read_dmi_syspath(key)
+ if syspath_value is not None:
+ return syspath_value
dmidecode_path = which('dmidecode')
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
- LOG.warn("did not find either path {0} or dmidecode command".format(
- DMI_SYS_PATH))
-
+ LOG.warn("did not find either path %s or dmidecode command",
+ DMI_SYS_PATH)
return None
+
+
+def message_from_string(string):
+ if sys.version_info[:2] < (2, 7):
+ return email.message_from_file(six.StringIO(string))
+ return email.message_from_string(string)
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 200050d3..e96e1781 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -48,6 +48,7 @@ cloud_config_modules:
- ssh-import-id
- locale
- set-passwords
+ - snappy
- grub-dpkg
- apt-pipelining
- apt-configure
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 1c59c2cf..1236796c 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -536,6 +536,8 @@ timezone: US/Eastern
#
# to remedy this situation, 'def_log_file' can be set to a filename
# and syslog_fix_perms to a string containing "<user>:<group>"
+# if syslog_fix_perms is a list, it will iterate through and use the
+# first pair that does not raise error.
#
# the default values are '/var/log/cloud-init.log' and 'syslog:adm'
# the value of 'def_log_file' should match what is configured in logging
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index cc0d0ede..a2024bdc 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -166,7 +166,7 @@ For now see: http://maas.ubuntu.com/
CloudStack
---------------------------
-*TODO*
+.. include:: ../../sources/cloudstack/README.rst
---------------------------
OVF
diff --git a/doc/sources/cloudstack/README.rst b/doc/sources/cloudstack/README.rst
new file mode 100644
index 00000000..eba1cd7e
--- /dev/null
+++ b/doc/sources/cloudstack/README.rst
@@ -0,0 +1,29 @@
+`Apache CloudStack`_ expose user-data, meta-data, user password and account
+sshkey thru the Virtual-Router. For more details on meta-data and user-data,
+refer the `CloudStack Administrator Guide`_.
+
+URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
+is the Virtual Router IP:
+
+.. code:: bash
+
+ http://10.1.1.1/latest/user-data
+ http://10.1.1.1/latest/meta-data
+ http://10.1.1.1/latest/meta-data/{metadata type}
+
+Configuration
+~~~~~~~~~~~~~
+
+Apache CloudStack datasource can be configured as follows:
+
+.. code:: yaml
+
+ datasource:
+ CloudStack: {}
+ None: {}
+ datasource_list:
+ - CloudStack
+
+
+.. _Apache CloudStack: http://cloudstack.apache.org/
+.. _CloudStack Administrator Guide: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/virtual_machines.html#user-data-and-meta-data \ No newline at end of file
diff --git a/packages/brpm b/packages/brpm
index 72bfca08..c6d79e75 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -40,7 +40,7 @@ PKG_MP = {
'jinja2': 'python-jinja2',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch',
- 'oauth': 'python-oauth',
+ 'oauthlib': 'python-oauth',
'prettytable': 'python-prettytable',
'pyserial': 'pyserial',
'pyyaml': 'PyYAML',
diff --git a/packages/debian/control.in b/packages/debian/control.in
index bd6e3867..6de09f23 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -18,9 +18,8 @@ Architecture: all
Depends: procps,
${python},
${requires},
- software-properties-common,
${misc:Depends},
-Recommends: sudo
+Recommends: eatmydata, sudo, software-properties-common
XB-Python-Version: ${python:Versions}
Description: Init scripts for cloud instances
Cloud instances need special scripts to run during initialisation
diff --git a/systemd/cloud-config.service b/systemd/cloud-config.service
index 41a86147..f9f1996e 100644
--- a/systemd/cloud-config.service
+++ b/systemd/cloud-config.service
@@ -1,8 +1,7 @@
[Unit]
Description=Apply the settings specified in cloud-config
-After=network.target syslog.target cloud-config.target
-Requires=cloud-config.target
-Wants=network.target
+After=network-online.target cloud-config.target syslog.target
+Wants=network-online.target cloud-config.target
[Service]
Type=oneshot
diff --git a/systemd/cloud-config.target b/systemd/cloud-config.target
index 28f5bcf1..ae9b7d02 100644
--- a/systemd/cloud-config.target
+++ b/systemd/cloud-config.target
@@ -7,4 +7,5 @@
[Unit]
Description=Cloud-config availability
-Requires=cloud-init-local.service cloud-init.service
+Wants=cloud-init-local.service cloud-init.service
+After=cloud-init-local.service cloud-init.service
diff --git a/systemd/cloud-final.service b/systemd/cloud-final.service
index ef0f52b9..c023ad94 100644
--- a/systemd/cloud-final.service
+++ b/systemd/cloud-final.service
@@ -1,8 +1,7 @@
[Unit]
Description=Execute cloud user/final scripts
-After=network.target syslog.target cloud-config.service rc-local.service
-Requires=cloud-config.target
-Wants=network.target
+After=network-online.target cloud-config.service syslog.target rc-local.service
+Wants=network-online.target cloud-config.service
[Service]
Type=oneshot
diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service
index 398b90ea..48920283 100644
--- a/systemd/cloud-init.service
+++ b/systemd/cloud-init.service
@@ -1,8 +1,8 @@
[Unit]
Description=Initial cloud-init job (metadata service crawler)
-After=local-fs.target network.target cloud-init-local.service
+After=local-fs.target network-online.target cloud-init-local.service
Before=sshd.service sshd-keygen.service systemd-user-sessions.service
-Requires=network.target
+Requires=network-online.target
Wants=local-fs.target cloud-init-local.service sshd.service sshd-keygen.service
[Service]
diff --git a/sysvinit/redhat/cloud-init-local b/sysvinit/redhat/cloud-init-local
index b53e0db2..b9caedbd 100755
--- a/sysvinit/redhat/cloud-init-local
+++ b/sysvinit/redhat/cloud-init-local
@@ -23,9 +23,12 @@
# See: http://www.novell.com/coolsolutions/feature/15380.html
# Also based on dhcpd in RHEL (for comparison)
+# Bring this up before network, S10
+#chkconfig: 2345 09 91
+
### BEGIN INIT INFO
# Provides: cloud-init-local
-# Required-Start: $local_fs $remote_fs
+# Required-Start: $local_fs
# Should-Start: $time
# Required-Stop:
# Should-Stop:
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 7516bd02..61a1f6ff 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -1,5 +1,6 @@
from __future__ import print_function
+import functools
import os
import sys
import shutil
@@ -25,9 +26,10 @@ PY2 = False
PY26 = False
PY27 = False
PY3 = False
+FIX_HTTPRETTY = False
_PY_VER = sys.version_info
-_PY_MAJOR, _PY_MINOR = _PY_VER[0:2]
+_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3]
if (_PY_MAJOR, _PY_MINOR) <= (2, 6):
if (_PY_MAJOR, _PY_MINOR) == (2, 6):
PY26 = True
@@ -39,6 +41,8 @@ else:
PY2 = True
if (_PY_MAJOR, _PY_MINOR) >= (3, 0):
PY3 = True
+ if _PY_MINOR == 4 and _PY_MICRO < 3:
+ FIX_HTTPRETTY = True
if PY26:
# For now add these on, taken from python 2.7 + slightly adjusted. Drop
@@ -268,6 +272,37 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
mock.patch.object(sys, 'stderr', stderr))
+def import_httpretty():
+ """Import HTTPretty and monkey patch Python 3.4 issue.
+ See https://github.com/gabrielfalcao/HTTPretty/pull/193 and
+ as well as https://github.com/gabrielfalcao/HTTPretty/issues/221.
+
+ Lifted from
+ https://github.com/inveniosoftware/datacite/blob/master/tests/helpers.py
+ """
+ if not FIX_HTTPRETTY:
+ import httpretty
+ else:
+ import socket
+ old_SocketType = socket.SocketType
+
+ import httpretty
+ from httpretty import core
+
+ def sockettype_patch(f):
+ @functools.wraps(f)
+ def inner(*args, **kwargs):
+ f(*args, **kwargs)
+ socket.SocketType = old_SocketType
+ socket.__dict__['SocketType'] = old_SocketType
+ return inner
+
+ core.httpretty.disable = sockettype_patch(
+ httpretty.httpretty.disable
+ )
+ return httpretty
+
+
class HttprettyTestCase(TestCase):
# necessary as http_proxy gets in the way of httpretty
# https://github.com/gabrielfalcao/HTTPretty/issues/122
@@ -288,7 +323,10 @@ def populate_dir(path, files):
os.makedirs(path)
for (name, content) in files.items():
with open(os.path.join(path, name), "wb") as fp:
- fp.write(content.encode('utf-8'))
+ if isinstance(content, six.binary_type):
+ fp.write(content)
+ else:
+ fp.write(content.encode('utf-8'))
fp.close()
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 1a307e56..c32783a6 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -181,7 +181,7 @@ class TestCmdlineUrl(unittest.TestCase):
def test_invalid_content(self):
url = "http://example.com/foo"
key = "mykey"
- payload = "0"
+ payload = b"0"
cmdline = "ro %s=%s bar=1" % (key, url)
with mock.patch('cloudinit.url_helper.readurl',
@@ -194,13 +194,13 @@ class TestCmdlineUrl(unittest.TestCase):
def test_valid_content(self):
url = "http://example.com/foo"
key = "mykey"
- payload = "xcloud-config\nmydata: foo\nbar: wark\n"
+ payload = b"xcloud-config\nmydata: foo\nbar: wark\n"
cmdline = "ro %s=%s bar=1" % (key, url)
with mock.patch('cloudinit.url_helper.readurl',
return_value=url_helper.StringResponse(payload)):
self.assertEqual(
- util.get_cmdline_url(names=[key], starts="xcloud-config",
+ util.get_cmdline_url(names=[key], starts=b"xcloud-config",
cmdline=cmdline),
(key, url, payload))
@@ -210,7 +210,7 @@ class TestCmdlineUrl(unittest.TestCase):
cmdline = "ro %s=%s bar=1" % (key, url)
with mock.patch('cloudinit.url_helper.readurl',
- return_value=url_helper.StringResponse('')):
+ return_value=url_helper.StringResponse(b'')):
self.assertEqual(
util.get_cmdline_url(names=["does-not-appear"],
starts="#cloud-config", cmdline=cmdline),
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 48475515..c603bfdb 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -13,6 +13,7 @@ except ImportError:
from six import BytesIO, StringIO
+from email import encoders
from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
@@ -58,7 +59,6 @@ def gzip_text(text):
return contents.getvalue()
-
# FIXME: these tests shouldn't be checking log output??
# Weirddddd...
class TestConsumeUserData(helpers.FilesystemMockingTestCase):
@@ -493,6 +493,50 @@ c: 4
mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600),
])
+ def test_mime_application_octet_stream(self):
+ """Mime type application/octet-stream is ignored but shows warning."""
+ ci = stages.Init()
+ message = MIMEBase("application", "octet-stream")
+ message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc')
+ encoders.encode_base64(message)
+ ci.datasource = FakeDataSource(message.as_string().encode())
+
+ with mock.patch('cloudinit.util.write_file') as mockobj:
+ log_file = self.capture_log(logging.WARNING)
+ ci.fetch()
+ ci.consume_data()
+ self.assertIn(
+ "Unhandled unknown content-type (application/octet-stream)",
+ log_file.getvalue())
+ mockobj.assert_called_once_with(
+ ci.paths.get_ipath("cloud_config"), "", 0o600)
+
+ def test_cloud_config_archive(self):
+ non_decodable = b'\x11\xc9\xb4gTH\xee\x12'
+ data = [{'content': '#cloud-config\npassword: gocubs\n'},
+ {'content': '#cloud-config\nlocale: chicago\n'},
+ {'content': non_decodable}]
+ message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode()
+
+ ci = stages.Init()
+ ci.datasource = FakeDataSource(message)
+
+ fs = {}
+
+ def fsstore(filename, content, mode=0o0644, omode="wb"):
+ fs[filename] = content
+
+ # consuming the user-data provided should write 'cloud_config' file
+ # which will have our yaml in it.
+ with mock.patch('cloudinit.util.write_file') as mockobj:
+ mockobj.side_effect = fsstore
+ ci.fetch()
+ ci.consume_data()
+
+ cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")])
+ self.assertEqual(cfg.get('password'), 'gocubs')
+ self.assertEqual(cfg.get('locale'), 'chicago')
+
class TestUDProcess(helpers.ResourceUsingTestCase):
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 8112c69b..7e789853 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -116,9 +116,6 @@ class TestAzureDataSource(TestCase):
data['iid_from_shared_cfg'] = path
return 'i-my-azure-id'
- def _apply_hostname_bounce(**kwargs):
- data['apply_hostname_bounce'] = kwargs
-
if data.get('ovfcontent') is not None:
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
{'ovf-env.xml': data['ovfcontent']})
@@ -132,7 +129,9 @@ class TestAzureDataSource(TestCase):
(mod, 'wait_for_files', _wait_for_files),
(mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
(mod, 'iid_from_shared_config', _iid_from_shared_config),
- (mod, 'apply_hostname_bounce', _apply_hostname_bounce),
+ (mod, 'perform_hostname_bounce', mock.MagicMock()),
+ (mod, 'get_hostname', mock.MagicMock()),
+ (mod, 'set_hostname', mock.MagicMock()),
])
dsrc = mod.DataSourceAzureNet(
@@ -272,47 +271,6 @@ class TestAzureDataSource(TestCase):
for mypk in mypklist:
self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- def test_disabled_bounce(self):
- pass
-
- def test_apply_bounce_call_1(self):
- # hostname needs to get through to apply_hostname_bounce
- odata = {'HostName': 'my-random-hostname'}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- self._get_ds(data).get_data()
- self.assertIn('hostname', data['apply_hostname_bounce'])
- self.assertEqual(data['apply_hostname_bounce']['hostname'],
- odata['HostName'])
-
- def test_apply_bounce_call_configurable(self):
- # hostname_bounce should be configurable in datasource cfg
- cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off',
- 'command': 'my-bounce-command',
- 'hostname_command': 'my-hostname-command'}}
- odata = {'HostName': "xhost",
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
- self._get_ds(data).get_data()
-
- for k in cfg['hostname_bounce']:
- self.assertIn(k, data['apply_hostname_bounce'])
-
- for k, v in cfg['hostname_bounce'].items():
- self.assertEqual(data['apply_hostname_bounce'][k], v)
-
- def test_set_hostname_disabled(self):
- # config specifying set_hostname off should not bounce
- cfg = {'set_hostname': False}
- odata = {'HostName': "xhost",
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
- self._get_ds(data).get_data()
-
- self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A")
-
def test_default_ephemeral(self):
# make sure the ephemeral device works
odata = {}
@@ -425,6 +383,175 @@ class TestAzureDataSource(TestCase):
load_file(os.path.join(self.waagent_d, 'ovf-env.xml')))
+class TestAzureBounce(TestCase):
+
+ def mock_out_azure_moving_parts(self):
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'invoke_agent'))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'wait_for_files'))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'iid_from_shared_config',
+ mock.MagicMock(return_value='i-my-azure-id')))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs',
+ mock.MagicMock(return_value=[])))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'find_ephemeral_disk',
+ mock.MagicMock(return_value=None)))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'find_ephemeral_part',
+ mock.MagicMock(return_value=None)))
+
+ def setUp(self):
+ super(TestAzureBounce, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
+ self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.addCleanup(shutil.rmtree, self.tmp)
+ DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+ self.patches = ExitStack()
+ self.mock_out_azure_moving_parts()
+ self.get_hostname = self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'get_hostname'))
+ self.set_hostname = self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'set_hostname'))
+ self.subp = self.patches.enter_context(
+ mock.patch('cloudinit.sources.DataSourceAzure.util.subp'))
+
+ def tearDown(self):
+ self.patches.close()
+
+ def _get_ds(self, ovfcontent=None):
+ if ovfcontent is not None:
+ populate_dir(os.path.join(self.paths.seed_dir, "azure"),
+ {'ovf-env.xml': ovfcontent})
+ return DataSourceAzure.DataSourceAzureNet(
+ {}, distro=None, paths=self.paths)
+
+ def get_ovf_env_with_dscfg(self, hostname, cfg):
+ odata = {
+ 'HostName': hostname,
+ 'dscfg': {
+ 'text': b64e(yaml.dump(cfg)),
+ 'encoding': 'base64'
+ }
+ }
+ return construct_valid_ovf_env(data=odata)
+
+ def test_disabled_bounce_does_not_change_hostname(self):
+ cfg = {'hostname_bounce': {'policy': 'off'}}
+ self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data()
+ self.assertEqual(0, self.set_hostname.call_count)
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_disabled_bounce_does_not_perform_bounce(
+ self, perform_hostname_bounce):
+ cfg = {'hostname_bounce': {'policy': 'off'}}
+ self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data()
+ self.assertEqual(0, perform_hostname_bounce.call_count)
+
+ def test_same_hostname_does_not_change_hostname(self):
+ host_name = 'unchanged-host-name'
+ self.get_hostname.return_value = host_name
+ cfg = {'hostname_bounce': {'policy': 'yes'}}
+ self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
+ self.assertEqual(0, self.set_hostname.call_count)
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_unchanged_hostname_does_not_perform_bounce(
+ self, perform_hostname_bounce):
+ host_name = 'unchanged-host-name'
+ self.get_hostname.return_value = host_name
+ cfg = {'hostname_bounce': {'policy': 'yes'}}
+ self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
+ self.assertEqual(0, perform_hostname_bounce.call_count)
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_force_performs_bounce_regardless(self, perform_hostname_bounce):
+ host_name = 'unchanged-host-name'
+ self.get_hostname.return_value = host_name
+ cfg = {'hostname_bounce': {'policy': 'force'}}
+ self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
+ self.assertEqual(1, perform_hostname_bounce.call_count)
+
+ def test_different_hostnames_sets_hostname(self):
+ expected_hostname = 'azure-expected-host-name'
+ self.get_hostname.return_value = 'default-host-name'
+ self._get_ds(
+ self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data()
+ self.assertEqual(expected_hostname,
+ self.set_hostname.call_args_list[0][0][0])
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_different_hostnames_performs_bounce(
+ self, perform_hostname_bounce):
+ expected_hostname = 'azure-expected-host-name'
+ self.get_hostname.return_value = 'default-host-name'
+ self._get_ds(
+ self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data()
+ self.assertEqual(1, perform_hostname_bounce.call_count)
+
+ def test_different_hostnames_sets_hostname_back(self):
+ initial_host_name = 'default-host-name'
+ self.get_hostname.return_value = initial_host_name
+ self._get_ds(
+ self.get_ovf_env_with_dscfg('some-host-name', {})).get_data()
+ self.assertEqual(initial_host_name,
+ self.set_hostname.call_args_list[-1][0][0])
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_failure_in_bounce_still_resets_host_name(
+ self, perform_hostname_bounce):
+ perform_hostname_bounce.side_effect = Exception
+ initial_host_name = 'default-host-name'
+ self.get_hostname.return_value = initial_host_name
+ self._get_ds(
+ self.get_ovf_env_with_dscfg('some-host-name', {})).get_data()
+ self.assertEqual(initial_host_name,
+ self.set_hostname.call_args_list[-1][0][0])
+
+ def test_environment_correct_for_bounce_command(self):
+ interface = 'int0'
+ hostname = 'my-new-host'
+ old_hostname = 'my-old-host'
+ self.get_hostname.return_value = old_hostname
+ cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}}
+ data = self.get_ovf_env_with_dscfg(hostname, cfg)
+ self._get_ds(data).get_data()
+ self.assertEqual(1, self.subp.call_count)
+ bounce_env = self.subp.call_args[1]['env']
+ self.assertEqual(interface, bounce_env['interface'])
+ self.assertEqual(hostname, bounce_env['hostname'])
+ self.assertEqual(old_hostname, bounce_env['old_hostname'])
+
+ def test_default_bounce_command_used_by_default(self):
+ cmd = 'default-bounce-command'
+ DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd
+ cfg = {'hostname_bounce': {'policy': 'force'}}
+ data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+ self._get_ds(data).get_data()
+ self.assertEqual(1, self.subp.call_count)
+ bounce_args = self.subp.call_args[1]['args']
+ self.assertEqual(cmd, bounce_args)
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_set_hostname_option_can_disable_bounce(
+ self, perform_hostname_bounce):
+ cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
+ data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+ self._get_ds(data).get_data()
+
+ self.assertEqual(0, perform_hostname_bounce.call_count)
+
+ def test_set_hostname_option_can_disable_hostname_set(self):
+ cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
+ data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+ self._get_ds(data).get_data()
+
+ self.assertEqual(0, self.set_hostname.call_count)
+
+
class TestReadAzureOvf(TestCase):
def test_invalid_xml_raises_non_azure_ds(self):
invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index e28bdd84..83aca505 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -2,6 +2,7 @@ from copy import copy
import json
import os
import shutil
+import six
import tempfile
try:
@@ -45,7 +46,7 @@ EC2_META = {
'reservation-id': 'r-iru5qm4m',
'security-groups': ['default']
}
-USER_DATA = '#!/bin/sh\necho This is user data\n'
+USER_DATA = b'#!/bin/sh\necho This is user data\n'
OSTACK_META = {
'availability_zone': 'nova',
'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
@@ -56,8 +57,8 @@ OSTACK_META = {
'public_keys': {'mykey': PUBKEY},
'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
-CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
+CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
+CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
CFG_DRIVE_FILES_V2 = {
'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
@@ -346,8 +347,12 @@ def populate_dir(seed_dir, files):
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
- with open(path, "w") as fp:
+ if isinstance(content, six.text_type):
+ mode = "w"
+ else:
+ mode = "wb"
+
+ with open(path, mode) as fp:
fp.write(content)
- fp.close()
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
index 98f9cfac..679d1b82 100644
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ b/tests/unittests/test_datasource/test_digitalocean.py
@@ -15,7 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import httpretty
import re
from six.moves.urllib_parse import urlparse
@@ -26,6 +25,8 @@ from cloudinit.sources import DataSourceDigitalOcean
from .. import helpers as test_helpers
+httpretty = test_helpers.import_httpretty()
+
# Abbreviated for the test
DO_INDEX = """id
hostname
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 6dd4b5ed..1fb100f7 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -15,7 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import httpretty
import re
from base64 import b64encode, b64decode
@@ -27,12 +26,14 @@ from cloudinit.sources import DataSourceGCE
from .. import helpers as test_helpers
+httpretty = test_helpers.import_httpretty()
+
GCE_META = {
'instance/id': '123',
'instance/zone': 'foo/bar',
'project/attributes/sshKeys': 'user:ssh-rsa AA2..+aRD0fyVw== root@server',
'instance/hostname': 'server.project-foo.local',
- 'instance/attributes/user-data': '/bin/echo foo\n',
+ 'instance/attributes/user-data': b'/bin/echo foo\n',
}
GCE_META_PARTIAL = {
@@ -112,10 +113,6 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.assertEqual(GCE_META.get('instance/attributes/user-data'),
self.ds.get_userdata_raw())
- # we expect a list of public ssh keys with user names stripped
- self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'],
- self.ds.get_public_ssh_keys())
-
# test partial metadata (missing user-data in particular)
@httpretty.activate
def test_metadata_partial(self):
@@ -140,3 +137,48 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
decoded = b64decode(
GCE_META_ENCODING.get('instance/attributes/user-data'))
self.assertEqual(decoded, self.ds.get_userdata_raw())
+
+ @httpretty.activate
+ def test_missing_required_keys_return_false(self):
+ for required_key in ['instance/id', 'instance/zone',
+ 'instance/hostname']:
+ meta = GCE_META_PARTIAL.copy()
+ del meta[required_key]
+ httpretty.register_uri(httpretty.GET, MD_URL_RE,
+ body=_new_request_callback(meta))
+ self.assertEqual(False, self.ds.get_data())
+ httpretty.reset()
+
+ @httpretty.activate
+ def test_project_level_ssh_keys_are_used(self):
+ httpretty.register_uri(httpretty.GET, MD_URL_RE,
+ body=_new_request_callback())
+ self.ds.get_data()
+
+ # we expect a list of public ssh keys with user names stripped
+ self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'],
+ self.ds.get_public_ssh_keys())
+
+ @httpretty.activate
+ def test_instance_level_ssh_keys_are_used(self):
+ key_content = 'ssh-rsa JustAUser root@server'
+ meta = GCE_META.copy()
+ meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content)
+
+ httpretty.register_uri(httpretty.GET, MD_URL_RE,
+ body=_new_request_callback(meta))
+ self.ds.get_data()
+
+ self.assertIn(key_content, self.ds.get_public_ssh_keys())
+
+ @httpretty.activate
+ def test_instance_level_keys_replace_project_level_keys(self):
+ key_content = 'ssh-rsa JustAUser root@server'
+ meta = GCE_META.copy()
+ meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content)
+
+ httpretty.register_uri(httpretty.GET, MD_URL_RE,
+ body=_new_request_callback(meta))
+ self.ds.get_data()
+
+ self.assertEqual([key_content], self.ds.get_public_ssh_keys())
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index d25e1adc..f109bb04 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -26,7 +26,7 @@ class TestMAASDataSource(TestCase):
data = {'instance-id': 'i-valid01',
'local-hostname': 'valid01-hostname',
- 'user-data': 'valid01-userdata',
+ 'user-data': b'valid01-userdata',
'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
my_d = os.path.join(self.tmp, "valid")
@@ -46,7 +46,7 @@ class TestMAASDataSource(TestCase):
data = {'instance-id': 'i-valid-extra',
'local-hostname': 'valid-extra-hostname',
- 'user-data': 'valid-extra-userdata', 'foo': 'bar'}
+ 'user-data': b'valid-extra-userdata', 'foo': 'bar'}
my_d = os.path.join(self.tmp, "valid_extra")
populate_dir(my_d, data)
@@ -103,7 +103,7 @@ class TestMAASDataSource(TestCase):
'meta-data/instance-id': 'i-instanceid',
'meta-data/local-hostname': 'test-hostname',
'meta-data/public-keys': 'test-hostname',
- 'user-data': 'foodata',
+ 'user-data': b'foodata',
}
valid_order = [
'meta-data/local-hostname',
@@ -143,7 +143,7 @@ class TestMAASDataSource(TestCase):
userdata, metadata = DataSourceMAAS.read_maas_seed_url(
my_seed, header_cb=my_headers_cb, version=my_ver)
- self.assertEqual("foodata", userdata)
+ self.assertEqual(b"foodata", userdata)
self.assertEqual(metadata['instance-id'],
valid['meta-data/instance-id'])
self.assertEqual(metadata['local-hostname'],
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 4f967f58..85b4c25a 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -37,7 +37,7 @@ class TestNoCloudDataSource(TestCase):
def test_nocloud_seed_dir(self):
md = {'instance-id': 'IID', 'dsmode': 'local'}
- ud = "USER_DATA_HERE"
+ ud = b"USER_DATA_HERE"
populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
{'user-data': ud, 'meta-data': yaml.safe_dump(md)})
@@ -92,20 +92,20 @@ class TestNoCloudDataSource(TestCase):
data = {
'fs_label': None,
'meta-data': yaml.safe_dump({'instance-id': 'IID'}),
- 'user-data': "USER_DATA_RAW",
+ 'user-data': b"USER_DATA_RAW",
}
sys_cfg = {'datasource': {'NoCloud': data}}
dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
- self.assertEqual(dsrc.userdata_raw, "USER_DATA_RAW")
+ self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW")
self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
self.assertTrue(ret)
def test_nocloud_seed_with_vendordata(self):
md = {'instance-id': 'IID', 'dsmode': 'local'}
- ud = "USER_DATA_HERE"
- vd = "THIS IS MY VENDOR_DATA"
+ ud = b"USER_DATA_HERE"
+ vd = b"THIS IS MY VENDOR_DATA"
populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
{'user-data': ud, 'meta-data': yaml.safe_dump(md),
@@ -126,7 +126,7 @@ class TestNoCloudDataSource(TestCase):
def test_nocloud_no_vendordata(self):
populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': "ud", 'meta-data': "instance-id: IID\n"})
+ {'user-data': b"ud", 'meta-data': "instance-id: IID\n"})
sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
@@ -134,7 +134,7 @@ class TestNoCloudDataSource(TestCase):
dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
- self.assertEqual(dsrc.userdata_raw, "ud")
+ self.assertEqual(dsrc.userdata_raw, b"ud")
self.assertFalse(dsrc.vendordata)
self.assertTrue(ret)
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 81ef1546..0aa1ba84 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -31,7 +31,7 @@ from cloudinit.sources import DataSourceOpenStack as ds
from cloudinit.sources.helpers import openstack
from cloudinit import util
-import httpretty as hp
+hp = test_helpers.import_httpretty()
BASE_URL = "http://169.254.169.254"
PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
@@ -49,7 +49,7 @@ EC2_META = {
'public-ipv4': '0.0.0.1',
'reservation-id': 'r-iru5qm4m',
}
-USER_DATA = '#!/bin/sh\necho This is user data\n'
+USER_DATA = b'#!/bin/sh\necho This is user data\n'
VENDOR_DATA = {
'magic': '',
}
@@ -63,8 +63,8 @@ OSTACK_META = {
'public_keys': {'mykey': PUBKEY},
'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c',
}
-CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
+CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
+CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
OS_FILES = {
'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
'openstack/latest/user_data': USER_DATA,
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 8b62b1b1..adee9019 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -24,18 +24,28 @@
from __future__ import print_function
-from cloudinit import helpers as c_helpers
-from cloudinit.sources import DataSourceSmartOS
-from cloudinit.util import b64e
-from .. import helpers
import os
import os.path
import re
import shutil
-import tempfile
import stat
+import tempfile
import uuid
+from binascii import crc32
+
+import serial
+import six
+from cloudinit import helpers as c_helpers
+from cloudinit.sources import DataSourceSmartOS
+from cloudinit.util import b64e
+
+from .. import helpers
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
MOCK_RETURNS = {
'hostname': 'test-host',
@@ -54,60 +64,15 @@ MOCK_RETURNS = {
DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc')
-class MockSerial(object):
- """Fake a serial terminal for testing the code that
- interfaces with the serial"""
-
- port = None
+def get_mock_client(mockdata):
+ class MockMetadataClient(object):
- def __init__(self, mockdata):
- self.last = None
- self.last = None
- self.new = True
- self.count = 0
- self.mocked_out = []
- self.mockdata = mockdata
+ def __init__(self, serial):
+ pass
- def open(self):
- return True
-
- def close(self):
- return True
-
- def isOpen(self):
- return True
-
- def write(self, line):
- line = line.replace('GET ', '')
- self.last = line.rstrip()
-
- def readline(self):
- if self.new:
- self.new = False
- if self.last in self.mockdata:
- return 'SUCCESS\n'
- else:
- return 'NOTFOUND %s\n' % self.last
-
- if self.last in self.mockdata:
- if not self.mocked_out:
- self.mocked_out = [x for x in self._format_out()]
-
- if len(self.mocked_out) > self.count:
- self.count += 1
- return self.mocked_out[self.count - 1]
-
- def _format_out(self):
- if self.last in self.mockdata:
- _mret = self.mockdata[self.last]
- try:
- for l in _mret.splitlines():
- yield "%s\n" % l.rstrip()
- except:
- yield "%s\n" % _mret.rstrip()
-
- yield '.'
- yield '\n'
+ def get_metadata(self, metadata_key):
+ return mockdata.get(metadata_key)
+ return MockMetadataClient
class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
@@ -155,9 +120,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
if dmi_data is None:
dmi_data = DMI_DATA_RETURN
- def _get_serial(*_):
- return MockSerial(mockdata)
-
def _dmi_data():
return dmi_data
@@ -174,7 +136,9 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
sys_cfg['datasource']['SmartOS'] = ds_cfg
self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)])
- self.apply_patches([(mod, 'get_serial', _get_serial)])
+ self.apply_patches([(mod, 'get_serial', mock.MagicMock())])
+ self.apply_patches([
+ (mod, 'JoyentMetadataClient', get_mock_client(mockdata))])
self.apply_patches([(mod, 'dmi_data', _dmi_data)])
self.apply_patches([(os, 'uname', _os_uname)])
self.apply_patches([(mod, 'device_exists', lambda d: True)])
@@ -443,6 +407,18 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
self.assertEqual(dsrc.device_name_to_device('FOO'),
mydscfg['disk_aliases']['FOO'])
+ @mock.patch('cloudinit.sources.DataSourceSmartOS.JoyentMetadataClient')
+ @mock.patch('cloudinit.sources.DataSourceSmartOS.get_serial')
+ def test_serial_console_closed_on_error(self, get_serial, metadata_client):
+ class OurException(Exception):
+ pass
+ metadata_client.side_effect = OurException
+ try:
+ DataSourceSmartOS.query_data('noun', 'device', 0)
+ except OurException:
+ pass
+ self.assertEqual(1, get_serial.return_value.close.call_count)
+
def apply_patches(patches):
ret = []
@@ -453,3 +429,133 @@ def apply_patches(patches):
setattr(ref, name, replace)
ret.append((ref, name, orig))
return ret
+
+
+class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
+
+ def setUp(self):
+ super(TestJoyentMetadataClient, self).setUp()
+ self.serial = mock.MagicMock(spec=serial.Serial)
+ self.request_id = 0xabcdef12
+ self.metadata_value = 'value'
+ self.response_parts = {
+ 'command': 'SUCCESS',
+ 'crc': 'b5a9ff00',
+ 'length': 17 + len(b64e(self.metadata_value)),
+ 'payload': b64e(self.metadata_value),
+ 'request_id': '{0:08x}'.format(self.request_id),
+ }
+
+ def make_response():
+ payload = ''
+ if self.response_parts['payload']:
+ payload = ' {0}'.format(self.response_parts['payload'])
+ del self.response_parts['payload']
+ return (
+ 'V2 {length} {crc} {request_id} {command}{payload}\n'.format(
+ payload=payload, **self.response_parts).encode('ascii'))
+ self.serial.readline.side_effect = make_response
+ self.patched_funcs.enter_context(
+ mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint',
+ mock.Mock(return_value=self.request_id)))
+
+ def _get_client(self):
+ return DataSourceSmartOS.JoyentMetadataClient(self.serial)
+
+ def assertEndsWith(self, haystack, prefix):
+ self.assertTrue(haystack.endswith(prefix),
+ "{0} does not end with '{1}'".format(
+ repr(haystack), prefix))
+
+ def assertStartsWith(self, haystack, prefix):
+ self.assertTrue(haystack.startswith(prefix),
+ "{0} does not start with '{1}'".format(
+ repr(haystack), prefix))
+
+ def test_get_metadata_writes_a_single_line(self):
+ client = self._get_client()
+ client.get_metadata('some_key')
+ self.assertEqual(1, self.serial.write.call_count)
+ written_line = self.serial.write.call_args[0][0]
+ self.assertEndsWith(written_line, b'\n')
+ self.assertEqual(1, written_line.count(b'\n'))
+
+ def _get_written_line(self, key='some_key'):
+ client = self._get_client()
+ client.get_metadata(key)
+ return self.serial.write.call_args[0][0]
+
+ def test_get_metadata_writes_bytes(self):
+ self.assertIsInstance(self._get_written_line(), six.binary_type)
+
+ def test_get_metadata_line_starts_with_v2(self):
+ self.assertStartsWith(self._get_written_line(), b'V2')
+
+ def test_get_metadata_uses_get_command(self):
+ parts = self._get_written_line().decode('ascii').strip().split(' ')
+ self.assertEqual('GET', parts[4])
+
+ def test_get_metadata_base64_encodes_argument(self):
+ key = 'my_key'
+ parts = self._get_written_line(key).decode('ascii').strip().split(' ')
+ self.assertEqual(b64e(key), parts[5])
+
+ def test_get_metadata_calculates_length_correctly(self):
+ parts = self._get_written_line().decode('ascii').strip().split(' ')
+ expected_length = len(' '.join(parts[3:]))
+ self.assertEqual(expected_length, int(parts[1]))
+
+ def test_get_metadata_uses_appropriate_request_id(self):
+ parts = self._get_written_line().decode('ascii').strip().split(' ')
+ request_id = parts[3]
+ self.assertEqual(8, len(request_id))
+ self.assertEqual(request_id, request_id.lower())
+
+ def test_get_metadata_uses_random_number_for_request_id(self):
+ line = self._get_written_line()
+ request_id = line.decode('ascii').strip().split(' ')[3]
+ self.assertEqual('{0:08x}'.format(self.request_id), request_id)
+
+ def test_get_metadata_checksums_correctly(self):
+ parts = self._get_written_line().decode('ascii').strip().split(' ')
+ expected_checksum = '{0:08x}'.format(
+ crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff)
+ checksum = parts[2]
+ self.assertEqual(expected_checksum, checksum)
+
+ def test_get_metadata_reads_a_line(self):
+ client = self._get_client()
+ client.get_metadata('some_key')
+ self.assertEqual(1, self.serial.readline.call_count)
+
+ def test_get_metadata_returns_valid_value(self):
+ client = self._get_client()
+ value = client.get_metadata('some_key')
+ self.assertEqual(self.metadata_value, value)
+
+ def test_get_metadata_throws_exception_for_incorrect_length(self):
+ self.response_parts['length'] = 0
+ client = self._get_client()
+ self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get_metadata, 'some_key')
+
+ def test_get_metadata_throws_exception_for_incorrect_crc(self):
+ self.response_parts['crc'] = 'deadbeef'
+ client = self._get_client()
+ self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get_metadata, 'some_key')
+
+ def test_get_metadata_throws_exception_for_request_id_mismatch(self):
+ self.response_parts['request_id'] = 'deadbeef'
+ client = self._get_client()
+ client._checksum = lambda _: self.response_parts['crc']
+ self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get_metadata, 'some_key')
+
+ def test_get_metadata_returns_None_if_value_not_found(self):
+ self.response_parts['payload'] = ''
+ self.response_parts['command'] = 'NOTFOUND'
+ self.response_parts['length'] = 17
+ client = self._get_client()
+ client._checksum = lambda _: self.response_parts['crc']
+ self.assertIsNone(client.get_metadata('some_key'))
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
index 84aa002e..99fc54be 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/test_ec2_util.py
@@ -3,7 +3,7 @@ from . import helpers
from cloudinit import ec2_utils as eu
from cloudinit import url_helper as uh
-import httpretty as hp
+hp = helpers.import_httpretty()
class TestEc2Util(helpers.HttprettyTestCase):
@@ -16,7 +16,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
body='stuff',
status=200)
userdata = eu.get_instance_userdata(self.VERSION)
- self.assertEquals('stuff', userdata)
+ self.assertEquals('stuff', userdata.decode('utf-8'))
@hp.activate
def test_userdata_fetch_fail_not_found(self):
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
index d8fe9a4f..895728b3 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -7,7 +7,6 @@ import os
import re
import shutil
import tempfile
-import unittest
class TestAptProxyConfig(TestCase):
@@ -30,7 +29,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
- contents = str(util.read_file_or_url(self.pfile))
+ contents = util.load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_http_proxy_written(self):
@@ -40,7 +39,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
- contents = str(util.read_file_or_url(self.pfile))
+ contents = util.load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_all_proxy_written(self):
@@ -58,7 +57,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
- contents = str(util.read_file_or_url(self.pfile))
+ contents = util.load_tfile_or_url(self.pfile)
for ptype, pval in values.items():
self.assertTrue(self._search_apt_config(contents, ptype, pval))
@@ -74,7 +73,7 @@ class TestAptProxyConfig(TestCase):
cc_apt_configure.apply_apt_config({'apt_proxy': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
- contents = str(util.read_file_or_url(self.pfile))
+ contents = util.load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def test_config_written(self):
@@ -86,14 +85,14 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.cfile))
self.assertFalse(os.path.isfile(self.pfile))
- self.assertEqual(str(util.read_file_or_url(self.cfile)), payload)
+ self.assertEqual(util.load_tfile_or_url(self.cfile), payload)
def test_config_replaced(self):
util.write_file(self.pfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({'apt_config': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.cfile))
- self.assertEqual(str(util.read_file_or_url(self.cfile)), "foo")
+ self.assertEqual(util.load_tfile_or_url(self.cfile), "foo")
def test_config_deleted(self):
# if no 'apt_config' is provided, delete any previously written file
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
new file mode 100644
index 00000000..ddef8d48
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_disk_setup.py
@@ -0,0 +1,30 @@
+from cloudinit.config import cc_disk_setup
+from ..helpers import ExitStack, mock, TestCase
+
+
+class TestIsDiskUsed(TestCase):
+
+ def setUp(self):
+ super(TestIsDiskUsed, self).setUp()
+ self.patches = ExitStack()
+ mod_name = 'cloudinit.config.cc_disk_setup'
+ self.enumerate_disk = self.patches.enter_context(
+ mock.patch('{0}.enumerate_disk'.format(mod_name)))
+ self.check_fs = self.patches.enter_context(
+ mock.patch('{0}.check_fs'.format(mod_name)))
+
+ def test_multiple_child_nodes_returns_true(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2))
+ self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
+ self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+ def test_valid_filesystem_returns_true(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
+ self.check_fs.return_value = (
+ mock.MagicMock(), 'ext4', mock.MagicMock())
+ self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+ def test_one_child_nodes_and_no_fs_returns_false(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
+ self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
+ self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock()))
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
new file mode 100644
index 00000000..eceb14d9
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -0,0 +1,306 @@
+from cloudinit.config.cc_snappy import (
+ makeop, get_package_ops, render_snap_op)
+from cloudinit import util
+from .. import helpers as t_help
+
+import os
+import shutil
+import tempfile
+import yaml
+
+ALLOWED = (dict, list, int, str)
+
+
+class TestInstallPackages(t_help.TestCase):
+ def setUp(self):
+ super(TestInstallPackages, self).setUp()
+ self.unapply = []
+
+ # by default 'which' has nothing in its path
+ self.apply_patches([(util, 'subp', self._subp)])
+ self.subp_called = []
+ self.snapcmds = []
+ self.tmp = tempfile.mkdtemp(prefix="TestInstallPackages")
+
+ def tearDown(self):
+ apply_patches([i for i in reversed(self.unapply)])
+ shutil.rmtree(self.tmp)
+
+ def apply_patches(self, patches):
+ ret = apply_patches(patches)
+ self.unapply += ret
+
+ def populate_tmp(self, files):
+ return t_help.populate_dir(self.tmp, files)
+
+ def _subp(self, *args, **kwargs):
+ # supports subp calling with cmd as args or kwargs
+ if 'args' not in kwargs:
+ kwargs['args'] = args[0]
+ self.subp_called.append(kwargs)
+ args = kwargs['args']
+ # here we basically parse the snappy command invoked
+ # and append to snapcmds a list of (mode, pkg, config)
+ if args[0:2] == ['snappy', 'config']:
+ if args[3] == "-":
+ config = kwargs.get('data', '')
+ else:
+ with open(args[3], "rb") as fp:
+ config = yaml.safe_load(fp.read())
+ self.snapcmds.append(['config', args[2], config])
+ elif args[0:2] == ['snappy', 'install']:
+ config = None
+ pkg = None
+ for arg in args[2:]:
+ if arg.startswith("-"):
+ continue
+ if not pkg:
+ pkg = arg
+ elif not config:
+ cfgfile = arg
+ if cfgfile == "-":
+ config = kwargs.get('data', '')
+ elif cfgfile:
+ with open(cfgfile, "rb") as fp:
+ config = yaml.safe_load(fp.read())
+ self.snapcmds.append(['install', pkg, config])
+
+ def test_package_ops_1(self):
+ ret = get_package_ops(
+ packages=['pkg1', 'pkg2', 'pkg3'],
+ configs={'pkg2': b'mycfg2'}, installed=[])
+ self.assertEqual(
+ ret, [makeop('install', 'pkg1', None, None),
+ makeop('install', 'pkg2', b'mycfg2', None),
+ makeop('install', 'pkg3', None, None)])
+
+ def test_package_ops_config_only(self):
+ ret = get_package_ops(
+ packages=None,
+ configs={'pkg2': b'mycfg2'}, installed=['pkg1', 'pkg2'])
+ self.assertEqual(
+ ret, [makeop('config', 'pkg2', b'mycfg2')])
+
+ def test_package_ops_install_and_config(self):
+ ret = get_package_ops(
+ packages=['pkg3', 'pkg2'],
+ configs={'pkg2': b'mycfg2', 'xinstalled': b'xcfg'},
+ installed=['xinstalled'])
+ self.assertEqual(
+ ret, [makeop('install', 'pkg3'),
+ makeop('install', 'pkg2', b'mycfg2'),
+ makeop('config', 'xinstalled', b'xcfg')])
+
+ def test_package_ops_install_long_config_short(self):
+ # a package can be installed by full name, but have config by short
+ cfg = {'k1': 'k2'}
+ ret = get_package_ops(
+ packages=['config-example.canonical'],
+ configs={'config-example': cfg}, installed=[])
+ self.assertEqual(
+ ret, [makeop('install', 'config-example.canonical', cfg)])
+
+ def test_package_ops_with_file(self):
+ self.populate_tmp(
+ {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg",
+ "snapf2.snap": b"foo2", "foo.bar": "ignored"})
+ ret = get_package_ops(
+ packages=['pkg1'], configs={}, installed=[], fspath=self.tmp)
+ self.assertEqual(
+ ret,
+ [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap",
+ cfgfile="snapf1.config"),
+ makeop_tmpd(self.tmp, 'install', 'snapf2', path="snapf2.snap"),
+ makeop('install', 'pkg1')])
+
+ def test_package_ops_common_filename(self):
+ # fish package name from filename
+ # package names likely look like: pkgname.namespace_version_arch.snap
+
+ # find filenames
+ self.populate_tmp(
+ {"pkg-ws.smoser_0.3.4_all.snap": "pkg-ws-snapdata",
+ "pkg-ws.config": "pkg-ws-config",
+ "pkg1.smoser_1.2.3_all.snap": "pkg1.snapdata",
+ "pkg1.smoser.config": "pkg1.smoser.config-data",
+ "pkg1.config": "pkg1.config-data",
+ "pkg2.smoser_0.0_amd64.snap": "pkg2-snapdata",
+ "pkg2.smoser_0.0_amd64.config": "pkg2.config",
+ })
+
+ ret = get_package_ops(
+ packages=[], configs={}, installed=[], fspath=self.tmp)
+ self.assertEqual(
+ ret,
+ [makeop_tmpd(self.tmp, 'install', 'pkg-ws.smoser',
+ path="pkg-ws.smoser_0.3.4_all.snap",
+ cfgfile="pkg-ws.config"),
+ makeop_tmpd(self.tmp, 'install', 'pkg1.smoser',
+ path="pkg1.smoser_1.2.3_all.snap",
+ cfgfile="pkg1.smoser.config"),
+ makeop_tmpd(self.tmp, 'install', 'pkg2.smoser',
+ path="pkg2.smoser_0.0_amd64.snap",
+ cfgfile="pkg2.smoser_0.0_amd64.config"),
+ ])
+
+ def test_package_ops_config_overrides_file(self):
+ # config data overrides local file .config
+ self.populate_tmp(
+ {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg"})
+ ret = get_package_ops(
+ packages=[], configs={'snapf1': 'snapf1cfg-config'},
+ installed=[], fspath=self.tmp)
+ self.assertEqual(
+ ret, [makeop_tmpd(self.tmp, 'install', 'snapf1',
+ path="snapf1.snap", config="snapf1cfg-config")])
+
+ def test_package_ops_namespacing(self):
+ cfgs = {
+ 'config-example': {'k1': 'v1'},
+ 'pkg1': {'p1': 'p2'},
+ 'ubuntu-core': {'c1': 'c2'},
+ 'notinstalled.smoser': {'s1': 's2'},
+ }
+ ret = get_package_ops(
+ packages=['config-example.canonical'], configs=cfgs,
+ installed=['config-example.smoser', 'pkg1.canonical',
+ 'ubuntu-core'])
+
+ expected_configs = [
+ makeop('config', 'pkg1', config=cfgs['pkg1']),
+ makeop('config', 'ubuntu-core', config=cfgs['ubuntu-core'])]
+ expected_installs = [
+ makeop('install', 'config-example.canonical',
+ config=cfgs['config-example'])]
+
+ installs = [i for i in ret if i['op'] == 'install']
+ configs = [c for c in ret if c['op'] == 'config']
+
+ self.assertEqual(installs, expected_installs)
+ # configs are not ordered
+ self.assertEqual(len(configs), len(expected_configs))
+ self.assertTrue(all(found in expected_configs for found in configs))
+
+ def test_render_op_localsnap(self):
+ self.populate_tmp({"snapf1.snap": b"foo1"})
+ op = makeop_tmpd(self.tmp, 'install', 'snapf1',
+ path='snapf1.snap')
+ render_snap_op(**op)
+ self.assertEqual(
+ self.snapcmds, [['install', op['path'], None]])
+
+ def test_render_op_localsnap_localconfig(self):
+ self.populate_tmp(
+ {"snapf1.snap": b"foo1", 'snapf1.config': b'snapf1cfg'})
+ op = makeop_tmpd(self.tmp, 'install', 'snapf1',
+ path='snapf1.snap', cfgfile='snapf1.config')
+ render_snap_op(**op)
+ self.assertEqual(
+ self.snapcmds, [['install', op['path'], 'snapf1cfg']])
+
+ def test_render_op_snap(self):
+ op = makeop('install', 'snapf1')
+ render_snap_op(**op)
+ self.assertEqual(
+ self.snapcmds, [['install', 'snapf1', None]])
+
+ def test_render_op_snap_config(self):
+ mycfg = {'key1': 'value1'}
+ name = "snapf1"
+ op = makeop('install', name, config=mycfg)
+ render_snap_op(**op)
+ self.assertEqual(
+ self.snapcmds, [['install', name, {'config': {name: mycfg}}]])
+
+ def test_render_op_config_bytes(self):
+ name = "snapf1"
+ mycfg = b'myconfig'
+ op = makeop('config', name, config=mycfg)
+ render_snap_op(**op)
+ self.assertEqual(
+ self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]])
+
+ def test_render_op_config_string(self):
+ name = 'snapf1'
+ mycfg = 'myconfig: foo\nhisconfig: bar\n'
+ op = makeop('config', name, config=mycfg)
+ render_snap_op(**op)
+ self.assertEqual(
+ self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]])
+
+ def test_render_op_config_dict(self):
+ # config entry for package can be a dict, not a string blob
+ mycfg = {'foo': 'bar'}
+ name = 'snapf1'
+ op = makeop('config', name, config=mycfg)
+ render_snap_op(**op)
+ # snapcmds is a list of 3-entry lists. data_found will be the
+ # blob of data in the file in 'snappy install --config=<file>'
+ data_found = self.snapcmds[0][2]
+ self.assertEqual(mycfg, data_found['config'][name])
+
+ def test_render_op_config_list(self):
+ # config entry for package can be a list, not a string blob
+ mycfg = ['foo', 'bar', 'wark', {'f1': 'b1'}]
+ name = "snapf1"
+ op = makeop('config', name, config=mycfg)
+ render_snap_op(**op)
+ data_found = self.snapcmds[0][2]
+ self.assertEqual(mycfg, data_found['config'][name])
+
+ def test_render_op_config_int(self):
+ # config entry for package can be a list, not a string blob
+ mycfg = 1
+ name = 'snapf1'
+ op = makeop('config', name, config=mycfg)
+ render_snap_op(**op)
+ data_found = self.snapcmds[0][2]
+ self.assertEqual(mycfg, data_found['config'][name])
+
+ def test_render_long_configs_short(self):
+ # install a namespaced package should have un-namespaced config
+ mycfg = {'k1': 'k2'}
+ name = 'snapf1'
+ op = makeop('install', name + ".smoser", config=mycfg)
+ render_snap_op(**op)
+ data_found = self.snapcmds[0][2]
+ self.assertEqual(mycfg, data_found['config'][name])
+
+ def test_render_does_not_pad_cfgfile(self):
+ # package_ops with cfgfile should not modify --file= content.
+ mydata = "foo1: bar1\nk: [l1, l2, l3]\n"
+ self.populate_tmp(
+ {"snapf1.snap": b"foo1", "snapf1.config": mydata.encode()})
+ ret = get_package_ops(
+ packages=[], configs={}, installed=[], fspath=self.tmp)
+ self.assertEqual(
+ ret,
+ [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap",
+ cfgfile="snapf1.config")])
+
+ # now the op was ok, but test that render didn't mess it up.
+ render_snap_op(**ret[0])
+ data_found = self.snapcmds[0][2]
+ # the data found gets loaded in the snapcmd interpretation
+ # so this comparison is a bit lossy, but input to snappy config
+ # is expected to be yaml loadable, so it should be OK.
+ self.assertEqual(yaml.safe_load(mydata), data_found)
+
+
+def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
+ if cfgfile:
+ cfgfile = os.path.sep.join([tmpd, cfgfile])
+ if path:
+ path = os.path.sep.join([tmpd, path])
+ return(makeop(op=op, name=name, config=config, path=path, cfgfile=cfgfile))
+
+
+def apply_patches(patches):
+ ret = []
+ for (ref, name, replace) in patches:
+ if replace is None:
+ continue
+ orig = getattr(ref, name)
+ setattr(ref, name, replace)
+ ret.append((ref, name, orig))
+ return ret
diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py
index 7089bde6..38fd75b6 100644
--- a/tests/unittests/test_pathprefix2dict.py
+++ b/tests/unittests/test_pathprefix2dict.py
@@ -14,28 +14,28 @@ class TestPathPrefix2Dict(TestCase):
self.addCleanup(shutil.rmtree, self.tmp)
def test_required_only(self):
- dirdata = {'f1': 'f1content', 'f2': 'f2content'}
+ dirdata = {'f1': b'f1content', 'f2': b'f2content'}
populate_dir(self.tmp, dirdata)
ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2'])
self.assertEqual(dirdata, ret)
def test_required_missing(self):
- dirdata = {'f1': 'f1content'}
+ dirdata = {'f1': b'f1content'}
populate_dir(self.tmp, dirdata)
kwargs = {'required': ['f1', 'f2']}
self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
def test_no_required_and_optional(self):
- dirdata = {'f1': 'f1c', 'f2': 'f2c'}
+ dirdata = {'f1': b'f1c', 'f2': b'f2c'}
populate_dir(self.tmp, dirdata)
ret = util.pathprefix2dict(self.tmp, required=None,
- optional=['f1', 'f2'])
+ optional=['f1', 'f2'])
self.assertEqual(dirdata, ret)
def test_required_and_optional(self):
- dirdata = {'f1': 'f1c', 'f2': 'f2c'}
+ dirdata = {'f1': b'f1c', 'f2': b'f2c'}
populate_dir(self.tmp, dirdata)
ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2'])
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index cf7c03b0..0c19a2c2 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -18,10 +18,6 @@
from __future__ import print_function
-import sys
-import six
-import unittest
-
from . import helpers as test_helpers
import textwrap
@@ -30,6 +26,7 @@ from cloudinit import templater
try:
import Cheetah
HAS_CHEETAH = True
+ Cheetah # make pyflakes happy, as Cheetah is not used here
except ImportError:
HAS_CHEETAH = False
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 33c191a9..1619b5d2 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -323,58 +323,67 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
class TestReadDMIData(helpers.FilesystemMockingTestCase):
- def _patchIn(self, root):
- self.patchOS(root)
- self.patchUtils(root)
+ def setUp(self):
+ super(TestReadDMIData, self).setUp()
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+ self.patchOS(self.new_root)
+ self.patchUtils(self.new_root)
- def _write_key(self, key, content):
- """Mocks the sys path found on Linux systems."""
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
+ def _create_sysfs_parent_directory(self):
util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
+ def _create_sysfs_file(self, key, content):
+ """Mocks the sys path found on Linux systems."""
+ self._create_sysfs_parent_directory()
dmi_key = "/sys/class/dmi/id/{0}".format(key)
util.write_file(dmi_key, content)
- def _no_syspath(self, key, content):
+ def _configure_dmidecode_return(self, key, content, error=None):
"""
In order to test a missing sys path and call outs to dmidecode, this
function fakes the results of dmidecode to test the results.
"""
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
- self.real_which = util.which
- self.real_subp = util.subp
-
- def _which(key):
- return True
- util.which = _which
-
- def _cdd(_key, error=None):
+ def _dmidecode_subp(cmd):
+ if cmd[-1] != key:
+ raise util.ProcessExecutionError()
return (content, error)
- util.subp = _cdd
-
- def test_key(self):
- key_content = "TEST-KEY-DATA"
- self._write_key("key", key_content)
- self.assertEquals(key_content, util.read_dmi_data("key"))
- def test_key_mismatch(self):
- self._write_key("test", "ABC")
- self.assertNotEqual("123", util.read_dmi_data("test"))
-
- def test_no_key(self):
- self._no_syspath(None, None)
- self.assertFalse(util.read_dmi_data("key"))
-
- def test_callout_dmidecode(self):
- """test to make sure that dmidecode is used when no syspath"""
- self._no_syspath("key", "stuff")
- self.assertEquals("stuff", util.read_dmi_data("key"))
- self._no_syspath("key", None)
- self.assertFalse(None, util.read_dmi_data("key"))
+ self.patched_funcs.enter_context(
+ mock.patch.object(util, 'which', lambda _: True))
+ self.patched_funcs.enter_context(
+ mock.patch.object(util, 'subp', _dmidecode_subp))
+
+ def patch_mapping(self, new_mapping):
+ self.patched_funcs.enter_context(
+ mock.patch('cloudinit.util.DMIDECODE_TO_DMI_SYS_MAPPING',
+ new_mapping))
+
+ def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self):
+ self.patch_mapping({'mapped-key': 'mapped-value'})
+ expected_dmi_value = 'sys-used-correctly'
+ self._create_sysfs_file('mapped-value', expected_dmi_value)
+ self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong')
+ self.assertEqual(expected_dmi_value, util.read_dmi_data('mapped-key'))
+
+ def test_dmidecode_used_if_no_sysfs_file_on_disk(self):
+ self.patch_mapping({})
+ self._create_sysfs_parent_directory()
+ expected_dmi_value = 'dmidecode-used'
+ self._configure_dmidecode_return('use-dmidecode', expected_dmi_value)
+ self.assertEqual(expected_dmi_value,
+ util.read_dmi_data('use-dmidecode'))
+
+ def test_none_returned_if_neither_source_has_data(self):
+ self.patch_mapping({})
+ self._configure_dmidecode_return('key', 'value')
+ self.assertEqual(None, util.read_dmi_data('expect-fail'))
+
+ def test_none_returned_if_dmidecode_not_in_path(self):
+ self.patched_funcs.enter_context(
+ mock.patch.object(util, 'which', lambda _: False))
+ self.patch_mapping({})
+ self.assertEqual(None, util.read_dmi_data('expect-fail'))
class TestMultiLog(helpers.FilesystemMockingTestCase):
@@ -443,4 +452,11 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
util.multi_log('message', log=log, log_level=log_level)
self.assertEqual((log_level, mock.ANY), log.log.call_args[0])
+
+class TestMessageFromString(helpers.TestCase):
+
+ def test_unicode_not_messed_up(self):
+ roundtripped = util.message_from_string(u'\n').as_string()
+ self.assertNotIn('\x00', roundtripped)
+
# vi: ts=4 expandtab
diff --git a/tools/hacking.py b/tools/hacking.py
index e7797564..3175df38 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -128,7 +128,7 @@ def cloud_docstring_multiline_end(physical_line):
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) == pos):
- print physical_line
+ print(physical_line)
if (physical_line[pos + 3] == ' '):
return (pos, "N403: multi line docstring end on new line")
diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py
index eda59cb8..6e164590 100755
--- a/tools/validate-yaml.py
+++ b/tools/validate-yaml.py
@@ -4,7 +4,6 @@
"""
import sys
-
import yaml
@@ -17,7 +16,7 @@ if __name__ == "__main__":
yaml.safe_load(fh.read())
fh.close()
sys.stdout.write(" - ok\n")
- except Exception, e:
+ except Exception as e:
sys.stdout.write(" - bad (%s)\n" % (e))
bads += 1
if bads > 0: