summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog14
-rw-r--r--Makefile2
-rwxr-xr-xbin/cloud-init2
-rw-r--r--cloudinit/config/cc_apt_pipelining.py2
-rw-r--r--cloudinit/config/cc_disk_setup.py3
-rw-r--r--cloudinit/config/cc_snappy.py6
-rw-r--r--cloudinit/distros/__init__.py23
-rw-r--r--cloudinit/distros/rhel.py11
-rw-r--r--cloudinit/handlers/__init__.py11
-rw-r--r--cloudinit/sources/DataSourceAzure.py235
-rw-r--r--cloudinit/sources/DataSourceGCE.py92
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py1
-rw-r--r--cloudinit/sources/helpers/azure.py293
-rw-r--r--cloudinit/user_data.py9
-rw-r--r--cloudinit/util.py16
-rw-r--r--doc/rtd/topics/datasources.rst2
-rw-r--r--doc/sources/cloudstack/README.rst29
-rwxr-xr-xpackages/brpm4
-rw-r--r--systemd/cloud-config.service4
-rw-r--r--systemd/cloud-final.service4
-rw-r--r--systemd/cloud-init.service4
-rwxr-xr-xsysvinit/redhat/cloud-init-local5
-rw-r--r--tests/unittests/test_cli.py54
-rw-r--r--tests/unittests/test_data.py31
-rw-r--r--tests/unittests/test_datasource/test_azure.py330
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py444
-rw-r--r--tests/unittests/test_datasource/test_gce.py49
-rw-r--r--tests/unittests/test_datasource/test_smartos.py2
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure.py17
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py30
-rw-r--r--tests/unittests/test_handler/test_handler_snappy.py5
-rw-r--r--tests/unittests/test_templating.py5
-rw-r--r--tests/unittests/test_util.py17
-rwxr-xr-xtools/hacking.py2
-rwxr-xr-xtools/validate-yaml.py3
35 files changed, 1477 insertions, 284 deletions
diff --git a/ChangeLog b/ChangeLog
index 6651b8eb..68b1a459 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -30,6 +30,20 @@
- SmartOS: use v2 metadata service (LP: #1436417) [Daniel Watkins]
- NoCloud: fix local datasource claiming found without explicit dsmode
- Snappy: add support for installing snappy packages and configuring.
+ - systemd: use network-online instead of network.target (LP: #1440180)
+ [Steve Langasek]
+ - Add functionality to fixate the uid of a newly added user.
+ - Don't overwrite the hostname if the user has changed it after we set it.
+ - GCE datasource does not handle instance ssh keys (LP: 1403617)
+ - sysvinit: make cloud-init-local run before network (LP: #1275098)
+ [Surojit Pathak]
+ - Azure: do not re-set hostname if user has changed it (LP: #1375252)
+ - Fix exception when running with no arguments on Python 3. [Daniel Watkins]
+ - Centos: detect/expect use of systemd on centos 7. [Brian Rak]
+ - Azure: remove dependency on walinux-agent [Daniel Watkins]
+ - EC2: know about eu-central-1 availability-zone (LP: #1456684)
+ - Azure: remove password from on-disk ovf-env.xml (LP: #1443311) [Ben Howard]
+ - Doc: include information on user-data in OpenStack [Daniel Watkins]
0.7.6:
- open 0.7.6
- Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/Makefile b/Makefile
index 009257ca..bb0c5253 100644
--- a/Makefile
+++ b/Makefile
@@ -20,7 +20,7 @@ pep8:
@$(CWD)/tools/run-pep8 $(PY_FILES)
pyflakes:
- pyflakes $(PY_FILES)
+ @$(CWD)/tools/tox-venv py34 pyflakes $(PY_FILES)
pip-requirements:
@echo "Installing cloud-init dependencies..."
diff --git a/bin/cloud-init b/bin/cloud-init
index 50bd929e..1d3e7ee3 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -611,6 +611,8 @@ def main():
# Setup signal handlers before running
signal_handler.attach_handlers()
+ if not hasattr(args, 'action'):
+ parser.error('too few arguments')
(name, functor) = args.action
if name in ("modules", "init"):
functor = status_wrapper
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index e5629175..40c32c84 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -43,7 +43,7 @@ def handle(_name, cfg, _cloud, log, _args):
write_apt_snippet("0", log, DEFAULT_FILE)
elif apt_pipe_value_s in ("none", "unchanged", "os"):
return
- elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]:
+ elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
else:
log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index f899210b..e2ce6db4 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -304,8 +304,7 @@ def is_disk_used(device):
# If the child count is higher 1, then there are child nodes
# such as partition or device mapper nodes
- use_count = [x for x in enumerate_disk(device)]
- if len(use_count.splitlines()) > 1:
+ if len(list(enumerate_disk(device))) > 1:
return True
# If we see a file system, then its used
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 6a7ae09b..7aaec94a 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -42,12 +42,10 @@ Example config:
"""
from cloudinit import log as logging
-from cloudinit import templater
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
import glob
-import six
import tempfile
import os
@@ -72,7 +70,7 @@ def parse_filename(fname):
name = fname_noext.partition("_")[0]
shortname = name.partition(".")[0]
return(name, shortname, fname_noext)
-
+
def get_fs_package_ops(fspath):
if not fspath:
@@ -98,7 +96,7 @@ def makeop(op, name, config=None, path=None, cfgfile=None):
def get_package_config(configs, name):
# load the package's config from the configs dict.
- # prefer full-name entry (config-example.canonical)
+ # prefer full-name entry (config-example.canonical)
# over short name entry (config-example)
if name in configs:
return configs[name]
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index ab874b45..e0cce670 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -208,6 +208,15 @@ class Distro(object):
and sys_hostname != hostname):
update_files.append(sys_fn)
+ # If something else has changed the hostname after we set it
+ # initially, we should not overwrite those changes (we should
+ # only be setting the hostname once per instance)
+ if (sys_hostname and prev_hostname and
+ sys_hostname != prev_hostname):
+ LOG.info("%s differs from %s, assuming user maintained hostname.",
+ prev_hostname_fn, sys_fn)
+ return
+
# Remove duplicates (incase the previous config filename)
# is the same as the system config filename, don't bother
# doing it twice
@@ -222,11 +231,6 @@ class Distro(object):
util.logexc(LOG, "Failed to write hostname %s to %s", hostname,
fn)
- if (sys_hostname and prev_hostname and
- sys_hostname != prev_hostname):
- LOG.debug("%s differs from %s, assuming user maintained hostname.",
- prev_hostname_fn, sys_fn)
-
# If the system hostname file name was provided set the
# non-fqdn as the transient hostname.
if sys_fn in update_files:
@@ -318,6 +322,7 @@ class Distro(object):
"gecos": '--comment',
"homedir": '--home',
"primary_group": '--gid',
+ "uid": '--uid',
"groups": '--groups',
"passwd": '--password',
"shell": '--shell',
@@ -551,8 +556,12 @@ def _get_package_mirror_info(mirror_info, availability_zone=None,
if not mirror_info:
mirror_info = {}
- ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" %
- "north|northeast|east|southeast|south|southwest|west|northwest")
+ # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
+ # the region is us-east-1. so region = az[0:-1]
+ directions_re = '|'.join([
+ 'central', 'east', 'north', 'northeast', 'northwest',
+ 'south', 'southeast', 'southwest', 'west'])
+ ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re)
subst = {}
if availability_zone:
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 7408989c..30c805a6 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -116,6 +116,7 @@ class Distro(distros.Distro):
(dist, vers) = util.system_info()['dist'][:2]
major = (int)(vers.split('.')[0])
return ((dist.startswith('Red Hat Enterprise Linux') and major >= 7)
+ or (dist.startswith('CentOS Linux') and major >= 7)
or (dist.startswith('Fedora') and major >= 18))
def apply_locale(self, locale, out_fn=None):
@@ -132,7 +133,11 @@ class Distro(distros.Distro):
rhel_util.update_sysconfig_file(out_fn, locale_cfg)
def _write_hostname(self, hostname, out_fn):
- if self.uses_systemd():
+ # systemd will never update previous-hostname for us, so
+ # we need to do it ourselves
+ if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
+ util.write_file(out_fn, hostname)
+ elif self.uses_systemd():
util.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
host_cfg = {
@@ -155,7 +160,9 @@ class Distro(distros.Distro):
return (host_fn, self._read_hostname(host_fn))
def _read_hostname(self, filename, default=None):
- if self.uses_systemd():
+ if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ return util.load_file(filename).strip()
+ elif self.uses_systemd():
(out, _err) = util.subp(['hostname'])
if len(out):
return out
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index d62fcd19..53d5604a 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -170,12 +170,12 @@ def _extract_first_or_bytes(blob, size):
start = blob.split("\n", 1)[0]
else:
# We want to avoid decoding the whole blob (it might be huge)
- # By taking 4*size bytes we have a guarantee to decode size utf8 chars
- start = blob[:4*size].decode(errors='ignore').split("\n", 1)[0]
+ # By taking 4*size bytes we guarantee to decode size utf8 chars
+ start = blob[:4 * size].decode(errors='ignore').split("\n", 1)[0]
if len(start) >= size:
start = start[:size]
except UnicodeDecodeError:
- # Bytes array doesn't contain a text object -- return chunk of raw bytes
+ # Bytes array doesn't contain text so return chunk of raw bytes
start = blob[0:size]
return start
@@ -263,7 +263,10 @@ def fixup_handler(mod, def_freq=PER_INSTANCE):
def type_from_starts_with(payload, default=None):
- payload_lc = payload.lower()
+ try:
+ payload_lc = util.decode_binary(payload).lower()
+ except UnicodeDecodeError:
+ return default
payload_lc = payload_lc.lstrip()
for text in INCLUSION_SRCH:
if payload_lc.startswith(text):
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 6e030217..d0a882ca 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -17,17 +17,22 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
+import contextlib
import crypt
import fnmatch
import os
import os.path
import time
+import xml.etree.ElementTree as ET
+
from xml.dom import minidom
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
from cloudinit import sources
from cloudinit import util
+from cloudinit.sources.helpers.azure import (
+ get_metadata_from_fabric, iid_from_shared_config_content)
LOG = logging.getLogger(__name__)
@@ -65,6 +70,40 @@ BUILTIN_CLOUD_CONFIG = {
DS_CFG_PATH = ['datasource', DS_NAME]
DEF_EPHEMERAL_LABEL = 'Temporary Storage'
+# The redacted password fails to meet password complexity requirements
+# so we can safely use this to mask/redact the password in the ovf-env.xml
+DEF_PASSWD_REDACTION = 'REDACTED'
+
+
+def get_hostname(hostname_command='hostname'):
+ return util.subp(hostname_command, capture=True)[0].strip()
+
+
+def set_hostname(hostname, hostname_command='hostname'):
+ util.subp([hostname_command, hostname])
+
+
+@contextlib.contextmanager
+def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
+ """
+ Set a temporary hostname, restoring the previous hostname on exit.
+
+ Will have the value of the previous hostname when used as a context
+ manager, or None if the hostname was not changed.
+ """
+ policy = cfg['hostname_bounce']['policy']
+ previous_hostname = get_hostname(hostname_command)
+ if (not util.is_true(cfg.get('set_hostname'))
+ or util.is_false(policy)
+ or (previous_hostname == temp_hostname and policy != 'force')):
+ yield None
+ return
+ set_hostname(temp_hostname, hostname_command)
+ try:
+ yield previous_hostname
+ finally:
+ set_hostname(previous_hostname, hostname_command)
+
class DataSourceAzureNet(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
@@ -80,6 +119,56 @@ class DataSourceAzureNet(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
+ def get_metadata_from_agent(self):
+ temp_hostname = self.metadata.get('local-hostname')
+ hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
+ with temporary_hostname(temp_hostname, self.ds_cfg,
+ hostname_command=hostname_command) \
+ as previous_hostname:
+ if (previous_hostname is not None
+ and util.is_true(self.ds_cfg.get('set_hostname'))):
+ cfg = self.ds_cfg['hostname_bounce']
+ try:
+ perform_hostname_bounce(hostname=temp_hostname,
+ cfg=cfg,
+ prev_hostname=previous_hostname)
+ except Exception as e:
+ LOG.warn("Failed publishing hostname: %s", e)
+ util.logexc(LOG, "handling set_hostname failed")
+
+ try:
+ invoke_agent(self.ds_cfg['agent_command'])
+ except util.ProcessExecutionError:
+ # claim the datasource even if the command failed
+ util.logexc(LOG, "agent command '%s' failed.",
+ self.ds_cfg['agent_command'])
+
+ ddir = self.ds_cfg['data_dir']
+ shcfgxml = os.path.join(ddir, "SharedConfig.xml")
+ wait_for = [shcfgxml]
+
+ fp_files = []
+ for pk in self.cfg.get('_pubkeys', []):
+ bname = str(pk['fingerprint'] + ".crt")
+ fp_files += [os.path.join(ddir, bname)]
+
+ missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+ func=wait_for_files,
+ args=(wait_for + fp_files,))
+ if len(missing):
+ LOG.warn("Did not find files, but going on: %s", missing)
+
+ metadata = {}
+ if shcfgxml in missing:
+ LOG.warn("SharedConfig.xml missing, using static instance-id")
+ else:
+ try:
+ metadata['instance-id'] = iid_from_shared_config(shcfgxml)
+ except ValueError as e:
+ LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
+ metadata['public-keys'] = pubkeys_from_crt_files(fp_files)
+ return metadata
+
def get_data(self):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
@@ -132,8 +221,6 @@ class DataSourceAzureNet(sources.DataSource):
# now update ds_cfg to reflect contents pass in config
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
- mycfg = self.ds_cfg
- ddir = mycfg['data_dir']
if found != ddir:
cached_ovfenv = util.load_file(
@@ -154,46 +241,18 @@ class DataSourceAzureNet(sources.DataSource):
# the directory to be protected.
write_files(ddir, files, dirmode=0o700)
- # handle the hostname 'publishing'
- try:
- handle_set_hostname(mycfg.get('set_hostname'),
- self.metadata.get('local-hostname'),
- mycfg['hostname_bounce'])
- except Exception as e:
- LOG.warn("Failed publishing hostname: %s", e)
- util.logexc(LOG, "handling set_hostname failed")
-
- try:
- invoke_agent(mycfg['agent_command'])
- except util.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- mycfg['agent_command'])
-
- shcfgxml = os.path.join(ddir, "SharedConfig.xml")
- wait_for = [shcfgxml]
-
- fp_files = []
- for pk in self.cfg.get('_pubkeys', []):
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
-
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
- func=wait_for_files,
- args=(wait_for + fp_files,))
- if len(missing):
- LOG.warn("Did not find files, but going on: %s", missing)
-
- if shcfgxml in missing:
- LOG.warn("SharedConfig.xml missing, using static instance-id")
+ if self.ds_cfg['agent_command'] == '__builtin__':
+ metadata_func = get_metadata_from_fabric
else:
- try:
- self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
- except ValueError as e:
- LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
+ metadata_func = self.get_metadata_from_agent
+ try:
+ fabric_data = metadata_func()
+ except Exception as exc:
+ LOG.info("Error communicating with Azure fabric; assume we aren't"
+ " on Azure.", exc_info=True)
+ return False
- pubkeys = pubkeys_from_crt_files(fp_files)
- self.metadata['public-keys'] = pubkeys
+ self.metadata.update(fabric_data)
found_ephemeral = find_ephemeral_disk()
if found_ephemeral:
@@ -299,39 +358,15 @@ def support_new_ephemeral(cfg):
return mod_list
-def handle_set_hostname(enabled, hostname, cfg):
- if not util.is_true(enabled):
- return
-
- if not hostname:
- LOG.warn("set_hostname was true but no local-hostname")
- return
-
- apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
- interface=cfg['interface'],
- command=cfg['command'],
- hostname_command=cfg['hostname_command'])
-
-
-def apply_hostname_bounce(hostname, policy, interface, command,
- hostname_command="hostname"):
+def perform_hostname_bounce(hostname, cfg, prev_hostname):
# set the hostname to 'hostname' if it is not already set to that.
# then, if policy is not off, bounce the interface using command
- prev_hostname = util.subp(hostname_command, capture=True)[0].strip()
-
- util.subp([hostname_command, hostname])
-
- msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
- (prev_hostname, hostname, policy, interface))
-
- if util.is_false(policy):
- LOG.debug("pubhname: policy false, skipping [%s]", msg)
- return
-
- if prev_hostname == hostname and policy != "force":
- LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
- return
+ command = cfg['command']
+ interface = cfg['interface']
+ policy = cfg['policy']
+ msg = ("hostname=%s policy=%s interface=%s" %
+ (hostname, policy, interface))
env = os.environ.copy()
env['interface'] = interface
env['hostname'] = hostname
@@ -344,15 +379,16 @@ def apply_hostname_bounce(hostname, policy, interface, command,
shell = not isinstance(command, (list, tuple))
# capture=False, see comments in bug 1202758 and bug 1206164.
util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
- kwargs={'args': command, 'shell': shell, 'capture': False,
- 'env': env})
+ get_uptime=True, func=util.subp,
+ kwargs={'args': command, 'shell': shell, 'capture': False,
+ 'env': env})
-def crtfile_to_pubkey(fname):
+def crtfile_to_pubkey(fname, data=None):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True)
+ (out, _err) = util.subp(['sh', '-c', pipeline, fname],
+ capture=True, data=data)
return out.rstrip()
@@ -384,14 +420,30 @@ def wait_for_files(flist, maxwait=60, naplen=.5):
def write_files(datadir, files, dirmode=None):
+
+ def _redact_password(cnt, fname):
+ """Azure provides the UserPassword in plain text. So we redact it"""
+ try:
+ root = ET.fromstring(cnt)
+ for elem in root.iter():
+ if ('UserPassword' in elem.tag and
+ elem.text != DEF_PASSWD_REDACTION):
+ elem.text = DEF_PASSWD_REDACTION
+ return ET.tostring(root)
+ except Exception as e:
+ LOG.critical("failed to redact userpassword in {}".format(fname))
+ return cnt
+
if not datadir:
return
if not files:
files = {}
util.ensure_dir(datadir, dirmode)
for (name, content) in files.items():
- util.write_file(filename=os.path.join(datadir, name),
- content=content, mode=0o600)
+ fname = os.path.join(datadir, name)
+ if 'ovf-env.xml' in name:
+ content = _redact_password(content, fname)
+ util.write_file(filename=fname, content=content, mode=0o600)
def invoke_agent(cmd):
@@ -462,20 +514,6 @@ def load_azure_ovf_pubkeys(sshnode):
return found
-def single_node_at_path(node, pathlist):
- curnode = node
- for tok in pathlist:
- results = find_child(curnode, lambda n: n.localName == tok)
- if len(results) == 0:
- raise ValueError("missing %s token in %s" % (tok, str(pathlist)))
- if len(results) > 1:
- raise ValueError("found %s nodes of type %s looking for %s" %
- (len(results), tok, str(pathlist)))
- curnode = results[0]
-
- return curnode
-
-
def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
@@ -560,7 +598,7 @@ def read_azure_ovf(contents):
defuser = {}
if username:
defuser['name'] = username
- if password:
+ if password and DEF_PASSWD_REDACTION != password:
defuser['passwd'] = encrypt_pass(password)
defuser['lock_passwd'] = False
@@ -606,19 +644,6 @@ def iid_from_shared_config(path):
return iid_from_shared_config_content(content)
-def iid_from_shared_config_content(content):
- """
- find INSTANCE_ID in:
- <?xml version="1.0" encoding="utf-8"?>
- <SharedConfig version="1.0.0.0" goalStateIncarnation="1">
- <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0">
- <Service name="..." guid="{00000000-0000-0000-0000-000000000000}" />
- """
- dom = minidom.parseString(content)
- depnode = single_node_at_path(dom, ["SharedConfig", "Deployment"])
- return depnode.attributes.get('name').value
-
-
class BrokenAzureDataSource(Exception):
pass
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 608c07f1..f4ed915d 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -30,6 +30,31 @@ BUILTIN_DS_CONFIG = {
REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
+class GoogleMetadataFetcher(object):
+ headers = {'X-Google-Metadata-Request': True}
+
+ def __init__(self, metadata_address):
+ self.metadata_address = metadata_address
+
+ def get_value(self, path, is_text):
+ value = None
+ try:
+ resp = url_helper.readurl(url=self.metadata_address + path,
+ headers=self.headers)
+ except url_helper.UrlError as exc:
+ msg = "url %s raised exception %s"
+ LOG.debug(msg, path, exc)
+ else:
+ if resp.code == 200:
+ if is_text:
+ value = util.decode_binary(resp.contents)
+ else:
+ value = resp.contents
+ else:
+ LOG.debug("url %s returned code %s", path, resp.code)
+ return value
+
+
class DataSourceGCE(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -50,17 +75,15 @@ class DataSourceGCE(sources.DataSource):
return public_key
def get_data(self):
- # GCE metadata server requires a custom header since v1
- headers = {'X-Google-Metadata-Request': True}
-
# url_map: (our-key, path, required, is_text)
url_map = [
- ('instance-id', 'instance/id', True, True),
- ('availability-zone', 'instance/zone', True, True),
- ('local-hostname', 'instance/hostname', True, True),
- ('public-keys', 'project/attributes/sshKeys', False, True),
- ('user-data', 'instance/attributes/user-data', False, False),
- ('user-data-encoding', 'instance/attributes/user-data-encoding',
+ ('instance-id', ('instance/id',), True, True),
+ ('availability-zone', ('instance/zone',), True, True),
+ ('local-hostname', ('instance/hostname',), True, True),
+ ('public-keys', ('project/attributes/sshKeys',
+ 'instance/attributes/sshKeys'), False, True),
+ ('user-data', ('instance/attributes/user-data',), False, False),
+ ('user-data-encoding', ('instance/attributes/user-data-encoding',),
False, True),
]
@@ -69,40 +92,25 @@ class DataSourceGCE(sources.DataSource):
LOG.debug("%s is not resolvable", self.metadata_address)
return False
+ metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
# iterate over url_map keys to get metadata items
- found = False
- for (mkey, path, required, is_text) in url_map:
- try:
- resp = url_helper.readurl(url=self.metadata_address + path,
- headers=headers)
- if resp.code == 200:
- found = True
- if is_text:
- self.metadata[mkey] = util.decode_binary(resp.contents)
- else:
- self.metadata[mkey] = resp.contents
+ running_on_gce = False
+ for (mkey, paths, required, is_text) in url_map:
+ value = None
+ for path in paths:
+ new_value = metadata_fetcher.get_value(path, is_text)
+ if new_value is not None:
+ value = new_value
+ if value:
+ running_on_gce = True
+ if required and value is None:
+ msg = "required key %s returned nothing. not GCE"
+ if not running_on_gce:
+ LOG.debug(msg, mkey)
else:
- if required:
- msg = "required url %s returned code %s. not GCE"
- if not found:
- LOG.debug(msg, path, resp.code)
- else:
- LOG.warn(msg, path, resp.code)
- return False
- else:
- self.metadata[mkey] = None
- except url_helper.UrlError as e:
- if required:
- msg = "required url %s raised exception %s. not GCE"
- if not found:
- LOG.debug(msg, path, e)
- else:
- LOG.warn(msg, path, e)
- return False
- msg = "Failed to get %s metadata item: %s."
- LOG.debug(msg, path, e)
-
- self.metadata[mkey] = None
+ LOG.warn(msg, mkey)
+ return False
+ self.metadata[mkey] = value
if self.metadata['public-keys']:
lines = self.metadata['public-keys'].splitlines()
@@ -116,7 +124,7 @@ class DataSourceGCE(sources.DataSource):
else:
LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
- return found
+ return running_on_gce
@property
def launch_index(self):
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 61709c1b..ac2c3b45 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -24,7 +24,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import base64
import os
import pwd
import re
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
new file mode 100644
index 00000000..281d733e
--- /dev/null
+++ b/cloudinit/sources/helpers/azure.py
@@ -0,0 +1,293 @@
+import logging
+import os
+import re
+import socket
+import struct
+import tempfile
+import time
+from contextlib import contextmanager
+from xml.etree import ElementTree
+
+from cloudinit import util
+
+
+LOG = logging.getLogger(__name__)
+
+
+@contextmanager
+def cd(newdir):
+ prevdir = os.getcwd()
+ os.chdir(os.path.expanduser(newdir))
+ try:
+ yield
+ finally:
+ os.chdir(prevdir)
+
+
+class AzureEndpointHttpClient(object):
+
+ headers = {
+ 'x-ms-agent-name': 'WALinuxAgent',
+ 'x-ms-version': '2012-11-30',
+ }
+
+ def __init__(self, certificate):
+ self.extra_secure_headers = {
+ "x-ms-cipher-name": "DES_EDE3_CBC",
+ "x-ms-guest-agent-public-x509-cert": certificate,
+ }
+
+ def get(self, url, secure=False):
+ headers = self.headers
+ if secure:
+ headers = self.headers.copy()
+ headers.update(self.extra_secure_headers)
+ return util.read_file_or_url(url, headers=headers)
+
+ def post(self, url, data=None, extra_headers=None):
+ headers = self.headers
+ if extra_headers is not None:
+ headers = self.headers.copy()
+ headers.update(extra_headers)
+ return util.read_file_or_url(url, data=data, headers=headers)
+
+
+class GoalState(object):
+
+ def __init__(self, xml, http_client):
+ self.http_client = http_client
+ self.root = ElementTree.fromstring(xml)
+ self._certificates_xml = None
+
+ def _text_from_xpath(self, xpath):
+ element = self.root.find(xpath)
+ if element is not None:
+ return element.text
+ return None
+
+ @property
+ def container_id(self):
+ return self._text_from_xpath('./Container/ContainerId')
+
+ @property
+ def incarnation(self):
+ return self._text_from_xpath('./Incarnation')
+
+ @property
+ def instance_id(self):
+ return self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance/InstanceId')
+
+ @property
+ def shared_config_xml(self):
+ url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance'
+ '/Configuration/SharedConfig')
+ return self.http_client.get(url).contents
+
+ @property
+ def certificates_xml(self):
+ if self._certificates_xml is None:
+ url = self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance'
+ '/Configuration/Certificates')
+ if url is not None:
+ self._certificates_xml = self.http_client.get(
+ url, secure=True).contents
+ return self._certificates_xml
+
+
+class OpenSSLManager(object):
+
+ certificate_names = {
+ 'private_key': 'TransportPrivate.pem',
+ 'certificate': 'TransportCert.pem',
+ }
+
+ def __init__(self):
+ self.tmpdir = tempfile.mkdtemp()
+ self.certificate = None
+ self.generate_certificate()
+
+ def clean_up(self):
+ util.del_dir(self.tmpdir)
+
+ def generate_certificate(self):
+ LOG.debug('Generating certificate for communication with fabric...')
+ if self.certificate is not None:
+ LOG.debug('Certificate already generated.')
+ return
+ with cd(self.tmpdir):
+ util.subp([
+ 'openssl', 'req', '-x509', '-nodes', '-subj',
+ '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
+ '-keyout', self.certificate_names['private_key'],
+ '-out', self.certificate_names['certificate'],
+ ])
+ certificate = ''
+ for line in open(self.certificate_names['certificate']):
+ if "CERTIFICATE" not in line:
+ certificate += line.rstrip()
+ self.certificate = certificate
+ LOG.debug('New certificate generated.')
+
+ def parse_certificates(self, certificates_xml):
+ tag = ElementTree.fromstring(certificates_xml).find(
+ './/Data')
+ certificates_content = tag.text
+ lines = [
+ b'MIME-Version: 1.0',
+ b'Content-Disposition: attachment; filename="Certificates.p7m"',
+ b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
+ b'Content-Transfer-Encoding: base64',
+ b'',
+ certificates_content.encode('utf-8'),
+ ]
+ with cd(self.tmpdir):
+ with open('Certificates.p7m', 'wb') as f:
+ f.write(b'\n'.join(lines))
+ out, _ = util.subp(
+ 'openssl cms -decrypt -in Certificates.p7m -inkey'
+ ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
+ ' -password pass:'.format(**self.certificate_names),
+ shell=True)
+ private_keys, certificates = [], []
+ current = []
+ for line in out.splitlines():
+ current.append(line)
+ if re.match(r'[-]+END .*?KEY[-]+$', line):
+ private_keys.append('\n'.join(current))
+ current = []
+ elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
+ certificates.append('\n'.join(current))
+ current = []
+ keys = []
+ for certificate in certificates:
+ with cd(self.tmpdir):
+ public_key, _ = util.subp(
+ 'openssl x509 -noout -pubkey |'
+ 'ssh-keygen -i -m PKCS8 -f /dev/stdin',
+ data=certificate,
+ shell=True)
+ keys.append(public_key)
+ return keys
+
+
+def iid_from_shared_config_content(content):
+ """
+ find INSTANCE_ID in:
+ <?xml version="1.0" encoding="utf-8"?>
+ <SharedConfig version="1.0.0.0" goalStateIncarnation="1">
+ <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0">
+ <Service name="..." guid="{00000000-0000-0000-0000-000000000000}"/>
+ """
+ root = ElementTree.fromstring(content)
+ depnode = root.find('Deployment')
+ return depnode.get('name')
+
+
+class WALinuxAgentShim(object):
+
+ REPORT_READY_XML_TEMPLATE = '\n'.join([
+ '<?xml version="1.0" encoding="utf-8"?>',
+ '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
+ ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
+ ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
+ ' <Container>',
+ ' <ContainerId>{container_id}</ContainerId>',
+ ' <RoleInstanceList>',
+ ' <Role>',
+ ' <InstanceId>{instance_id}</InstanceId>',
+ ' <Health>',
+ ' <State>Ready</State>',
+ ' </Health>',
+ ' </Role>',
+ ' </RoleInstanceList>',
+ ' </Container>',
+ '</Health>'])
+
+ def __init__(self):
+ LOG.debug('WALinuxAgentShim instantiated...')
+ self.endpoint = self.find_endpoint()
+ self.openssl_manager = None
+ self.values = {}
+
+ def clean_up(self):
+ if self.openssl_manager is not None:
+ self.openssl_manager.clean_up()
+
+ @staticmethod
+ def find_endpoint():
+ LOG.debug('Finding Azure endpoint...')
+ content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
+ value = None
+ for line in content.splitlines():
+ if 'unknown-245' in line:
+ value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
+ if value is None:
+ raise Exception('No endpoint found in DHCP config.')
+ if ':' in value:
+ hex_string = ''
+ for hex_pair in value.split(':'):
+ if len(hex_pair) == 1:
+ hex_pair = '0' + hex_pair
+ hex_string += hex_pair
+ value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
+ else:
+ value = value.encode('utf-8')
+ endpoint_ip_address = socket.inet_ntoa(value)
+ LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+ return endpoint_ip_address
+
+ def register_with_azure_and_fetch_data(self):
+ self.openssl_manager = OpenSSLManager()
+ http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
+ LOG.info('Registering with Azure...')
+ attempts = 0
+ while True:
+ try:
+ response = http_client.get(
+ 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
+ except Exception:
+ if attempts < 10:
+ time.sleep(attempts + 1)
+ else:
+ raise
+ else:
+ break
+ attempts += 1
+ LOG.debug('Successfully fetched GoalState XML.')
+ goal_state = GoalState(response.contents, http_client)
+ public_keys = []
+ if goal_state.certificates_xml is not None:
+ LOG.debug('Certificate XML found; parsing out public keys.')
+ public_keys = self.openssl_manager.parse_certificates(
+ goal_state.certificates_xml)
+ data = {
+ 'instance-id': iid_from_shared_config_content(
+ goal_state.shared_config_xml),
+ 'public-keys': public_keys,
+ }
+ self._report_ready(goal_state, http_client)
+ return data
+
+ def _report_ready(self, goal_state, http_client):
+ LOG.debug('Reporting ready to Azure fabric.')
+ document = self.REPORT_READY_XML_TEMPLATE.format(
+ incarnation=goal_state.incarnation,
+ container_id=goal_state.container_id,
+ instance_id=goal_state.instance_id,
+ )
+ http_client.post(
+ "http://{0}/machine?comp=health".format(self.endpoint),
+ data=document,
+ extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
+ )
+ LOG.info('Reported ready to Azure fabric.')
+
+
+def get_metadata_from_fabric():
+ shim = WALinuxAgentShim()
+ try:
+ return shim.register_with_azure_and_fetch_data()
+ finally:
+ shim.clean_up()
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index eb3c7336..f7c5787c 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -49,6 +49,7 @@ INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
ARCHIVE_TYPES = ["text/cloud-config-archive"]
UNDEF_TYPE = "text/plain"
ARCHIVE_UNDEF_TYPE = "text/cloud-config"
+ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
# This seems to hit most of the gzip possible content types.
DECOMP_TYPES = [
@@ -265,11 +266,15 @@ class UserDataProcessor(object):
content = ent.get('content', '')
mtype = ent.get('type')
if not mtype:
- mtype = handlers.type_from_starts_with(content,
- ARCHIVE_UNDEF_TYPE)
+ default = ARCHIVE_UNDEF_TYPE
+ if isinstance(content, six.binary_type):
+ default = ARCHIVE_UNDEF_BINARY_TYPE
+ mtype = handlers.type_from_starts_with(content, default)
maintype, subtype = mtype.split('/', 1)
if maintype == "text":
+ if isinstance(content, six.binary_type):
+ content = content.decode()
msg = MIMEText(content, _subtype=subtype)
else:
msg = MIMEBase(maintype, subtype)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 971c1c2d..db4e02b8 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -121,8 +121,12 @@ def fully_decoded_payload(part):
if (six.PY3 and
part.get_content_maintype() == 'text' and
isinstance(cte_payload, bytes)):
- charset = part.get_charset() or 'utf-8'
- return cte_payload.decode(charset, errors='surrogateescape')
+ charset = part.get_charset()
+ if charset and charset.input_codec:
+ encoding = charset.input_codec
+ else:
+ encoding = 'utf-8'
+ return cte_payload.decode(encoding, errors='surrogateescape')
return cte_payload
@@ -762,10 +766,6 @@ def fetch_ssl_details(paths=None):
return ssl_details
-def load_tfile_or_url(*args, **kwargs):
- return(decode_binary(read_file_or_url(*args, **kwargs).contents))
-
-
def read_file_or_url(url, timeout=5, retries=10,
headers=None, data=None, sec_between=1, ssl_details=None,
headers_cb=None, exception_cb=None):
@@ -833,10 +833,10 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
- md_resp = load_tfile_or_url(md_url, timeout, retries, file_retries)
+ md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
md = None
if md_resp.ok():
- md = load_yaml(md_resp.contents, default={})
+ md = load_yaml(decode_binary(md_resp.contents), default={})
ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
ud = None
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index c090d808..0d7d4aca 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -166,7 +166,7 @@ For now see: http://maas.ubuntu.com/
CloudStack
---------------------------
-*TODO*
+.. include:: ../../sources/cloudstack/README.rst
---------------------------
OVF
diff --git a/doc/sources/cloudstack/README.rst b/doc/sources/cloudstack/README.rst
new file mode 100644
index 00000000..eba1cd7e
--- /dev/null
+++ b/doc/sources/cloudstack/README.rst
@@ -0,0 +1,29 @@
+`Apache CloudStack`_ expose user-data, meta-data, user password and account
+sshkey thru the Virtual-Router. For more details on meta-data and user-data,
+refer the `CloudStack Administrator Guide`_.
+
+URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
+is the Virtual Router IP:
+
+.. code:: bash
+
+ http://10.1.1.1/latest/user-data
+ http://10.1.1.1/latest/meta-data
+ http://10.1.1.1/latest/meta-data/{metadata type}
+
+Configuration
+~~~~~~~~~~~~~
+
+Apache CloudStack datasource can be configured as follows:
+
+.. code:: yaml
+
+ datasource:
+ CloudStack: {}
+ None: {}
+ datasource_list:
+ - CloudStack
+
+
+.. _Apache CloudStack: http://cloudstack.apache.org/
+.. _CloudStack Administrator Guide: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/virtual_machines.html#user-data-and-meta-data \ No newline at end of file
diff --git a/packages/brpm b/packages/brpm
index 72bfca08..b41b675f 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -40,7 +40,7 @@ PKG_MP = {
'jinja2': 'python-jinja2',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch',
- 'oauth': 'python-oauth',
+ 'oauthlib': 'python-oauthlib',
'prettytable': 'python-prettytable',
'pyserial': 'pyserial',
'pyyaml': 'PyYAML',
@@ -52,7 +52,7 @@ PKG_MP = {
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch',
- 'oauth': 'python-oauth',
+ 'oauthlib': 'python-oauthlib',
'prettytable': 'python-prettytable',
'pyserial': 'python-pyserial',
'pyyaml': 'python-yaml',
diff --git a/systemd/cloud-config.service b/systemd/cloud-config.service
index ac25c776..f9f1996e 100644
--- a/systemd/cloud-config.service
+++ b/systemd/cloud-config.service
@@ -1,7 +1,7 @@
[Unit]
Description=Apply the settings specified in cloud-config
-After=network.target cloud-config.target syslog.target
-Wants=network.target cloud-config.target
+After=network-online.target cloud-config.target syslog.target
+Wants=network-online.target cloud-config.target
[Service]
Type=oneshot
diff --git a/systemd/cloud-final.service b/systemd/cloud-final.service
index bbcdf30b..c023ad94 100644
--- a/systemd/cloud-final.service
+++ b/systemd/cloud-final.service
@@ -1,7 +1,7 @@
[Unit]
Description=Execute cloud user/final scripts
-After=network.target cloud-config.service syslog.target rc-local.service
-Wants=network.target cloud-config.service
+After=network-online.target cloud-config.service syslog.target rc-local.service
+Wants=network-online.target cloud-config.service
[Service]
Type=oneshot
diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service
index 398b90ea..48920283 100644
--- a/systemd/cloud-init.service
+++ b/systemd/cloud-init.service
@@ -1,8 +1,8 @@
[Unit]
Description=Initial cloud-init job (metadata service crawler)
-After=local-fs.target network.target cloud-init-local.service
+After=local-fs.target network-online.target cloud-init-local.service
Before=sshd.service sshd-keygen.service systemd-user-sessions.service
-Requires=network.target
+Requires=network-online.target
Wants=local-fs.target cloud-init-local.service sshd.service sshd-keygen.service
[Service]
diff --git a/sysvinit/redhat/cloud-init-local b/sysvinit/redhat/cloud-init-local
index b53e0db2..b9caedbd 100755
--- a/sysvinit/redhat/cloud-init-local
+++ b/sysvinit/redhat/cloud-init-local
@@ -23,9 +23,12 @@
# See: http://www.novell.com/coolsolutions/feature/15380.html
# Also based on dhcpd in RHEL (for comparison)
+# Bring this up before network, S10
+#chkconfig: 2345 09 91
+
### BEGIN INIT INFO
# Provides: cloud-init-local
-# Required-Start: $local_fs $remote_fs
+# Required-Start: $local_fs
# Should-Start: $time
# Required-Stop:
# Should-Stop:
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
new file mode 100644
index 00000000..ed863399
--- /dev/null
+++ b/tests/unittests/test_cli.py
@@ -0,0 +1,54 @@
+import imp
+import os
+import sys
+import six
+
+from . import helpers as test_helpers
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+BIN_CLOUDINIT = "bin/cloud-init"
+
+
+class TestCLI(test_helpers.FilesystemMockingTestCase):
+
+ def setUp(self):
+ super(TestCLI, self).setUp()
+ self.stderr = six.StringIO()
+ self.patchStdoutAndStderr(stderr=self.stderr)
+ self.sys_exit = mock.MagicMock()
+ self.patched_funcs.enter_context(
+ mock.patch.object(sys, 'exit', self.sys_exit))
+
+ def _call_main(self):
+ self.patched_funcs.enter_context(
+ mock.patch.object(sys, 'argv', ['cloud-init']))
+ cli = imp.load_module(
+ 'cli', open(BIN_CLOUDINIT), '', ('', 'r', imp.PY_SOURCE))
+ try:
+ return cli.main()
+ except:
+ pass
+
+ @test_helpers.skipIf(not os.path.isfile(BIN_CLOUDINIT), "no bin/cloudinit")
+ def test_no_arguments_shows_usage(self):
+ self._call_main()
+ self.assertIn('usage: cloud-init', self.stderr.getvalue())
+
+ @test_helpers.skipIf(not os.path.isfile(BIN_CLOUDINIT), "no bin/cloudinit")
+ def test_no_arguments_exits_2(self):
+ exit_code = self._call_main()
+ if self.sys_exit.call_count:
+ self.assertEqual(mock.call(2), self.sys_exit.call_args)
+ else:
+ self.assertEqual(2, exit_code)
+
+ @test_helpers.skipIf(not os.path.isfile(BIN_CLOUDINIT), "no bin/cloudinit")
+ def test_no_arguments_shows_error_message(self):
+ self._call_main()
+ self.assertIn('cloud-init: error: too few arguments',
+ self.stderr.getvalue())
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 4f24e2dd..c603bfdb 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -494,10 +494,10 @@ c: 4
])
def test_mime_application_octet_stream(self):
- """Mime message of type application/octet-stream is ignored but shows warning."""
+ """Mime type application/octet-stream is ignored but shows warning."""
ci = stages.Init()
message = MIMEBase("application", "octet-stream")
- message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc\xbf')
+ message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc')
encoders.encode_base64(message)
ci.datasource = FakeDataSource(message.as_string().encode())
@@ -511,6 +511,33 @@ c: 4
mockobj.assert_called_once_with(
ci.paths.get_ipath("cloud_config"), "", 0o600)
+ def test_cloud_config_archive(self):
+ non_decodable = b'\x11\xc9\xb4gTH\xee\x12'
+ data = [{'content': '#cloud-config\npassword: gocubs\n'},
+ {'content': '#cloud-config\nlocale: chicago\n'},
+ {'content': non_decodable}]
+ message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode()
+
+ ci = stages.Init()
+ ci.datasource = FakeDataSource(message)
+
+ fs = {}
+
+ def fsstore(filename, content, mode=0o0644, omode="wb"):
+ fs[filename] = content
+
+ # consuming the user-data provided should write 'cloud_config' file
+ # which will have our yaml in it.
+ with mock.patch('cloudinit.util.write_file') as mockobj:
+ mockobj.side_effect = fsstore
+ ci.fetch()
+ ci.consume_data()
+
+ cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")])
+ self.assertEqual(cfg.get('password'), 'gocubs')
+ self.assertEqual(cfg.get('locale'), 'chicago')
+
+
class TestUDProcess(helpers.ResourceUsingTestCase):
def test_bytes_in_userdata(self):
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 8112c69b..33b971f6 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -18,7 +18,7 @@ import stat
import yaml
import shutil
import tempfile
-import unittest
+import xml.etree.ElementTree as ET
def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
@@ -116,9 +116,6 @@ class TestAzureDataSource(TestCase):
data['iid_from_shared_cfg'] = path
return 'i-my-azure-id'
- def _apply_hostname_bounce(**kwargs):
- data['apply_hostname_bounce'] = kwargs
-
if data.get('ovfcontent') is not None:
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
{'ovf-env.xml': data['ovfcontent']})
@@ -126,20 +123,61 @@ class TestAzureDataSource(TestCase):
mod = DataSourceAzure
mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+ self.get_metadata_from_fabric = mock.MagicMock(return_value={
+ 'instance-id': 'i-my-azure-id',
+ 'public-keys': [],
+ })
+
self.apply_patches([
(mod, 'list_possible_azure_ds_devs', dsdevs),
(mod, 'invoke_agent', _invoke_agent),
(mod, 'wait_for_files', _wait_for_files),
(mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
(mod, 'iid_from_shared_config', _iid_from_shared_config),
- (mod, 'apply_hostname_bounce', _apply_hostname_bounce),
- ])
+ (mod, 'perform_hostname_bounce', mock.MagicMock()),
+ (mod, 'get_hostname', mock.MagicMock()),
+ (mod, 'set_hostname', mock.MagicMock()),
+ (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
+ ])
dsrc = mod.DataSourceAzureNet(
data.get('sys_cfg', {}), distro=None, paths=self.paths)
return dsrc
+ def xml_equals(self, oxml, nxml):
+ """Compare two sets of XML to make sure they are equal"""
+
+ def create_tag_index(xml):
+ et = ET.fromstring(xml)
+ ret = {}
+ for x in et.iter():
+ ret[x.tag] = x
+ return ret
+
+ def tags_exists(x, y):
+ for tag in x.keys():
+ self.assertIn(tag, y)
+ for tag in y.keys():
+ self.assertIn(tag, x)
+
+ def tags_equal(x, y):
+ for x_tag, x_val in x.items():
+ y_val = y.get(x_val.tag)
+ self.assertEquals(x_val.text, y_val.text)
+
+ old_cnt = create_tag_index(oxml)
+ new_cnt = create_tag_index(nxml)
+ tags_exists(old_cnt, new_cnt)
+ tags_equal(old_cnt, new_cnt)
+
+ def xml_notequals(self, oxml, nxml):
+ try:
+ self.xml_equals(oxml, nxml)
+ except AssertionError as e:
+ return
+ raise AssertionError("XML is the same")
+
def test_basic_seed_dir(self):
odata = {'HostName': "myhost", 'UserName': "myuser"}
data = {'ovfcontent': construct_valid_ovf_env(data=odata),
@@ -272,47 +310,6 @@ class TestAzureDataSource(TestCase):
for mypk in mypklist:
self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- def test_disabled_bounce(self):
- pass
-
- def test_apply_bounce_call_1(self):
- # hostname needs to get through to apply_hostname_bounce
- odata = {'HostName': 'my-random-hostname'}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- self._get_ds(data).get_data()
- self.assertIn('hostname', data['apply_hostname_bounce'])
- self.assertEqual(data['apply_hostname_bounce']['hostname'],
- odata['HostName'])
-
- def test_apply_bounce_call_configurable(self):
- # hostname_bounce should be configurable in datasource cfg
- cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off',
- 'command': 'my-bounce-command',
- 'hostname_command': 'my-hostname-command'}}
- odata = {'HostName': "xhost",
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
- self._get_ds(data).get_data()
-
- for k in cfg['hostname_bounce']:
- self.assertIn(k, data['apply_hostname_bounce'])
-
- for k, v in cfg['hostname_bounce'].items():
- self.assertEqual(data['apply_hostname_bounce'][k], v)
-
- def test_set_hostname_disabled(self):
- # config specifying set_hostname off should not bounce
- cfg = {'set_hostname': False}
- odata = {'HostName': "xhost",
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
- self._get_ds(data).get_data()
-
- self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A")
-
def test_default_ephemeral(self):
# make sure the ephemeral device works
odata = {}
@@ -359,6 +356,31 @@ class TestAzureDataSource(TestCase):
self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw)
+ def test_password_redacted_in_ovf(self):
+ odata = {'HostName': "myhost", 'UserName': "myuser",
+ 'UserPassword': "mypass"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+
+ self.assertTrue(ret)
+ ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
+
+ # The XML should not be same since the user password is redacted
+ on_disk_ovf = load_file(ovf_env_path)
+ self.xml_notequals(data['ovfcontent'], on_disk_ovf)
+
+ # Make sure that the redacted password on disk is not used by CI
+ self.assertNotEquals(dsrc.cfg.get('password'),
+ DataSourceAzure.DEF_PASSWD_REDACTION)
+
+ # Make sure that the password was really encrypted
+ et = ET.fromstring(on_disk_ovf)
+ for elem in et.iter():
+ if 'UserPassword' in elem.tag:
+ self.assertEquals(DataSourceAzure.DEF_PASSWD_REDACTION,
+ elem.text)
+
def test_ovf_env_arrives_in_waagent_dir(self):
xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
dsrc = self._get_ds({'ovfcontent': xml})
@@ -368,7 +390,7 @@ class TestAzureDataSource(TestCase):
# we expect that the ovf-env.xml file is copied there.
ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
self.assertTrue(os.path.exists(ovf_env_path))
- self.assertEqual(xml, load_file(ovf_env_path))
+ self.xml_equals(xml, load_file(ovf_env_path))
def test_ovf_can_include_unicode(self):
xml = construct_valid_ovf_env(data={})
@@ -417,12 +439,198 @@ class TestAzureDataSource(TestCase):
self.assertEqual(dsrc.userdata_raw, b"NEW_USERDATA")
self.assertTrue(os.path.exists(
os.path.join(self.waagent_d, 'otherfile')))
- self.assertFalse(
- os.path.exists(os.path.join(self.waagent_d, 'SharedConfig.xml')))
- self.assertTrue(
- os.path.exists(os.path.join(self.waagent_d, 'ovf-env.xml')))
- self.assertEqual(new_ovfenv,
- load_file(os.path.join(self.waagent_d, 'ovf-env.xml')))
+ self.assertFalse(os.path.exists(
+ os.path.join(self.waagent_d, 'SharedConfig.xml')))
+ self.assertTrue(os.path.exists(
+ os.path.join(self.waagent_d, 'ovf-env.xml')))
+ new_xml = load_file(os.path.join(self.waagent_d, 'ovf-env.xml'))
+ self.xml_equals(new_ovfenv, new_xml)
+
+ def test_exception_fetching_fabric_data_doesnt_propagate(self):
+ ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ ds.ds_cfg['agent_command'] = '__builtin__'
+ self.get_metadata_from_fabric.side_effect = Exception
+ self.assertFalse(ds.get_data())
+
+ def test_fabric_data_included_in_metadata(self):
+ ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ ds.ds_cfg['agent_command'] = '__builtin__'
+ self.get_metadata_from_fabric.return_value = {'test': 'value'}
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual('value', ds.metadata['test'])
+
+
+class TestAzureBounce(TestCase):
+
+ def mock_out_azure_moving_parts(self):
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'invoke_agent'))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'wait_for_files'))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'iid_from_shared_config',
+ mock.MagicMock(return_value='i-my-azure-id')))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs',
+ mock.MagicMock(return_value=[])))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'find_ephemeral_disk',
+ mock.MagicMock(return_value=None)))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'find_ephemeral_part',
+ mock.MagicMock(return_value=None)))
+ self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric',
+ mock.MagicMock(return_value={})))
+
+ def setUp(self):
+ super(TestAzureBounce, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
+ self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.addCleanup(shutil.rmtree, self.tmp)
+ DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+ self.patches = ExitStack()
+ self.mock_out_azure_moving_parts()
+ self.get_hostname = self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'get_hostname'))
+ self.set_hostname = self.patches.enter_context(
+ mock.patch.object(DataSourceAzure, 'set_hostname'))
+ self.subp = self.patches.enter_context(
+ mock.patch('cloudinit.sources.DataSourceAzure.util.subp'))
+
+ def tearDown(self):
+ self.patches.close()
+
+ def _get_ds(self, ovfcontent=None):
+ if ovfcontent is not None:
+ populate_dir(os.path.join(self.paths.seed_dir, "azure"),
+ {'ovf-env.xml': ovfcontent})
+ return DataSourceAzure.DataSourceAzureNet(
+ {}, distro=None, paths=self.paths)
+
+ def get_ovf_env_with_dscfg(self, hostname, cfg):
+ odata = {
+ 'HostName': hostname,
+ 'dscfg': {
+ 'text': b64e(yaml.dump(cfg)),
+ 'encoding': 'base64'
+ }
+ }
+ return construct_valid_ovf_env(data=odata)
+
+ def test_disabled_bounce_does_not_change_hostname(self):
+ cfg = {'hostname_bounce': {'policy': 'off'}}
+ self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data()
+ self.assertEqual(0, self.set_hostname.call_count)
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_disabled_bounce_does_not_perform_bounce(
+ self, perform_hostname_bounce):
+ cfg = {'hostname_bounce': {'policy': 'off'}}
+ self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data()
+ self.assertEqual(0, perform_hostname_bounce.call_count)
+
+ def test_same_hostname_does_not_change_hostname(self):
+ host_name = 'unchanged-host-name'
+ self.get_hostname.return_value = host_name
+ cfg = {'hostname_bounce': {'policy': 'yes'}}
+ self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
+ self.assertEqual(0, self.set_hostname.call_count)
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_unchanged_hostname_does_not_perform_bounce(
+ self, perform_hostname_bounce):
+ host_name = 'unchanged-host-name'
+ self.get_hostname.return_value = host_name
+ cfg = {'hostname_bounce': {'policy': 'yes'}}
+ self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
+ self.assertEqual(0, perform_hostname_bounce.call_count)
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_force_performs_bounce_regardless(self, perform_hostname_bounce):
+ host_name = 'unchanged-host-name'
+ self.get_hostname.return_value = host_name
+ cfg = {'hostname_bounce': {'policy': 'force'}}
+ self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
+ self.assertEqual(1, perform_hostname_bounce.call_count)
+
+ def test_different_hostnames_sets_hostname(self):
+ expected_hostname = 'azure-expected-host-name'
+ self.get_hostname.return_value = 'default-host-name'
+ self._get_ds(
+ self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data()
+ self.assertEqual(expected_hostname,
+ self.set_hostname.call_args_list[0][0][0])
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_different_hostnames_performs_bounce(
+ self, perform_hostname_bounce):
+ expected_hostname = 'azure-expected-host-name'
+ self.get_hostname.return_value = 'default-host-name'
+ self._get_ds(
+ self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data()
+ self.assertEqual(1, perform_hostname_bounce.call_count)
+
+ def test_different_hostnames_sets_hostname_back(self):
+ initial_host_name = 'default-host-name'
+ self.get_hostname.return_value = initial_host_name
+ self._get_ds(
+ self.get_ovf_env_with_dscfg('some-host-name', {})).get_data()
+ self.assertEqual(initial_host_name,
+ self.set_hostname.call_args_list[-1][0][0])
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_failure_in_bounce_still_resets_host_name(
+ self, perform_hostname_bounce):
+ perform_hostname_bounce.side_effect = Exception
+ initial_host_name = 'default-host-name'
+ self.get_hostname.return_value = initial_host_name
+ self._get_ds(
+ self.get_ovf_env_with_dscfg('some-host-name', {})).get_data()
+ self.assertEqual(initial_host_name,
+ self.set_hostname.call_args_list[-1][0][0])
+
+ def test_environment_correct_for_bounce_command(self):
+ interface = 'int0'
+ hostname = 'my-new-host'
+ old_hostname = 'my-old-host'
+ self.get_hostname.return_value = old_hostname
+ cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}}
+ data = self.get_ovf_env_with_dscfg(hostname, cfg)
+ self._get_ds(data).get_data()
+ self.assertEqual(1, self.subp.call_count)
+ bounce_env = self.subp.call_args[1]['env']
+ self.assertEqual(interface, bounce_env['interface'])
+ self.assertEqual(hostname, bounce_env['hostname'])
+ self.assertEqual(old_hostname, bounce_env['old_hostname'])
+
+ def test_default_bounce_command_used_by_default(self):
+ cmd = 'default-bounce-command'
+ DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd
+ cfg = {'hostname_bounce': {'policy': 'force'}}
+ data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+ self._get_ds(data).get_data()
+ self.assertEqual(1, self.subp.call_count)
+ bounce_args = self.subp.call_args[1]['args']
+ self.assertEqual(cmd, bounce_args)
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+ def test_set_hostname_option_can_disable_bounce(
+ self, perform_hostname_bounce):
+ cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
+ data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+ self._get_ds(data).get_data()
+
+ self.assertEqual(0, perform_hostname_bounce.call_count)
+
+ def test_set_hostname_option_can_disable_hostname_set(self):
+ cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
+ data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+ self._get_ds(data).get_data()
+
+ self.assertEqual(0, self.set_hostname.call_count)
class TestReadAzureOvf(TestCase):
@@ -438,17 +646,3 @@ class TestReadAzureOvf(TestCase):
(_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content)
for mypk in mypklist:
self.assertIn(mypk, cfg['_pubkeys'])
-
-
-class TestReadAzureSharedConfig(unittest.TestCase):
- def test_valid_content(self):
- xml = """<?xml version="1.0" encoding="utf-8"?>
- <SharedConfig>
- <Deployment name="MY_INSTANCE_ID">
- <Service name="myservice"/>
- <ServiceInstance name="INSTANCE_ID.0" guid="{abcd-uuid}" />
- </Deployment>
- <Incarnation number="1"/>
- </SharedConfig>"""
- ret = DataSourceAzure.iid_from_shared_config_content(xml)
- self.assertEqual("MY_INSTANCE_ID", ret)
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
new file mode 100644
index 00000000..a5228870
--- /dev/null
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -0,0 +1,444 @@
+import os
+import struct
+import unittest
+
+from cloudinit.sources.helpers import azure as azure_helper
+from ..helpers import TestCase
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+try:
+ from contextlib import ExitStack
+except ImportError:
+ from contextlib2 import ExitStack
+
+
+GOAL_STATE_TEMPLATE = """\
+<?xml version="1.0" encoding="utf-8"?>
+<GoalState xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:noNamespaceSchemaLocation="goalstate10.xsd">
+ <Version>2012-11-30</Version>
+ <Incarnation>{incarnation}</Incarnation>
+ <Machine>
+ <ExpectedState>Started</ExpectedState>
+ <StopRolesDeadlineHint>300000</StopRolesDeadlineHint>
+ <LBProbePorts>
+ <Port>16001</Port>
+ </LBProbePorts>
+ <ExpectHealthReport>FALSE</ExpectHealthReport>
+ </Machine>
+ <Container>
+ <ContainerId>{container_id}</ContainerId>
+ <RoleInstanceList>
+ <RoleInstance>
+ <InstanceId>{instance_id}</InstanceId>
+ <State>Started</State>
+ <Configuration>
+ <HostingEnvironmentConfig>
+ http://100.86.192.70:80/...hostingEnvironmentConfig...
+ </HostingEnvironmentConfig>
+ <SharedConfig>{shared_config_url}</SharedConfig>
+ <ExtensionsConfig>
+ http://100.86.192.70:80/...extensionsConfig...
+ </ExtensionsConfig>
+ <FullConfig>http://100.86.192.70:80/...fullConfig...</FullConfig>
+ <Certificates>{certificates_url}</Certificates>
+ <ConfigName>68ce47.0.68ce47.0.utl-trusty--292258.1.xml</ConfigName>
+ </Configuration>
+ </RoleInstance>
+ </RoleInstanceList>
+ </Container>
+</GoalState>
+"""
+
+
+class TestReadAzureSharedConfig(unittest.TestCase):
+
+ def test_valid_content(self):
+ xml = """<?xml version="1.0" encoding="utf-8"?>
+ <SharedConfig>
+ <Deployment name="MY_INSTANCE_ID">
+ <Service name="myservice"/>
+ <ServiceInstance name="INSTANCE_ID.0" guid="{abcd-uuid}" />
+ </Deployment>
+ <Incarnation number="1"/>
+ </SharedConfig>"""
+ ret = azure_helper.iid_from_shared_config_content(xml)
+ self.assertEqual("MY_INSTANCE_ID", ret)
+
+
+class TestFindEndpoint(TestCase):
+
+ def setUp(self):
+ super(TestFindEndpoint, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.load_file = patches.enter_context(
+ mock.patch.object(azure_helper.util, 'load_file'))
+
+ def test_missing_file(self):
+ self.load_file.side_effect = IOError
+ self.assertRaises(IOError,
+ azure_helper.WALinuxAgentShim.find_endpoint)
+
+ def test_missing_special_azure_line(self):
+ self.load_file.return_value = ''
+ self.assertRaises(Exception,
+ azure_helper.WALinuxAgentShim.find_endpoint)
+
+ def _build_lease_content(self, ip_address, use_hex=True):
+ ip_address_repr = ':'.join(
+ [hex(int(part)).replace('0x', '')
+ for part in ip_address.split('.')])
+ if not use_hex:
+ ip_address_repr = struct.pack(
+ '>L', int(ip_address_repr.replace(':', ''), 16))
+ ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8'))
+ return '\n'.join([
+ 'lease {',
+ ' interface "eth0";',
+ ' option unknown-245 {0};'.format(ip_address_repr),
+ '}'])
+
+ def test_hex_string(self):
+ ip_address = '98.76.54.32'
+ file_content = self._build_lease_content(ip_address)
+ self.load_file.return_value = file_content
+ self.assertEqual(ip_address,
+ azure_helper.WALinuxAgentShim.find_endpoint())
+
+ def test_hex_string_with_single_character_part(self):
+ ip_address = '4.3.2.1'
+ file_content = self._build_lease_content(ip_address)
+ self.load_file.return_value = file_content
+ self.assertEqual(ip_address,
+ azure_helper.WALinuxAgentShim.find_endpoint())
+
+ def test_packed_string(self):
+ ip_address = '98.76.54.32'
+ file_content = self._build_lease_content(ip_address, use_hex=False)
+ self.load_file.return_value = file_content
+ self.assertEqual(ip_address,
+ azure_helper.WALinuxAgentShim.find_endpoint())
+
+ def test_latest_lease_used(self):
+ ip_addresses = ['4.3.2.1', '98.76.54.32']
+ file_content = '\n'.join([self._build_lease_content(ip_address)
+ for ip_address in ip_addresses])
+ self.load_file.return_value = file_content
+ self.assertEqual(ip_addresses[-1],
+ azure_helper.WALinuxAgentShim.find_endpoint())
+
+
+class TestGoalStateParsing(TestCase):
+
+ default_parameters = {
+ 'incarnation': 1,
+ 'container_id': 'MyContainerId',
+ 'instance_id': 'MyInstanceId',
+ 'shared_config_url': 'MySharedConfigUrl',
+ 'certificates_url': 'MyCertificatesUrl',
+ }
+
+ def _get_goal_state(self, http_client=None, **kwargs):
+ if http_client is None:
+ http_client = mock.MagicMock()
+ parameters = self.default_parameters.copy()
+ parameters.update(kwargs)
+ xml = GOAL_STATE_TEMPLATE.format(**parameters)
+ if parameters['certificates_url'] is None:
+ new_xml_lines = []
+ for line in xml.splitlines():
+ if 'Certificates' in line:
+ continue
+ new_xml_lines.append(line)
+ xml = '\n'.join(new_xml_lines)
+ return azure_helper.GoalState(xml, http_client)
+
+ def test_incarnation_parsed_correctly(self):
+ incarnation = '123'
+ goal_state = self._get_goal_state(incarnation=incarnation)
+ self.assertEqual(incarnation, goal_state.incarnation)
+
+ def test_container_id_parsed_correctly(self):
+ container_id = 'TestContainerId'
+ goal_state = self._get_goal_state(container_id=container_id)
+ self.assertEqual(container_id, goal_state.container_id)
+
+ def test_instance_id_parsed_correctly(self):
+ instance_id = 'TestInstanceId'
+ goal_state = self._get_goal_state(instance_id=instance_id)
+ self.assertEqual(instance_id, goal_state.instance_id)
+
+ def test_shared_config_xml_parsed_and_fetched_correctly(self):
+ http_client = mock.MagicMock()
+ shared_config_url = 'TestSharedConfigUrl'
+ goal_state = self._get_goal_state(
+ http_client=http_client, shared_config_url=shared_config_url)
+ shared_config_xml = goal_state.shared_config_xml
+ self.assertEqual(1, http_client.get.call_count)
+ self.assertEqual(shared_config_url, http_client.get.call_args[0][0])
+ self.assertEqual(http_client.get.return_value.contents,
+ shared_config_xml)
+
+ def test_certificates_xml_parsed_and_fetched_correctly(self):
+ http_client = mock.MagicMock()
+ certificates_url = 'TestSharedConfigUrl'
+ goal_state = self._get_goal_state(
+ http_client=http_client, certificates_url=certificates_url)
+ certificates_xml = goal_state.certificates_xml
+ self.assertEqual(1, http_client.get.call_count)
+ self.assertEqual(certificates_url, http_client.get.call_args[0][0])
+ self.assertTrue(http_client.get.call_args[1].get('secure', False))
+ self.assertEqual(http_client.get.return_value.contents,
+ certificates_xml)
+
+ def test_missing_certificates_skips_http_get(self):
+ http_client = mock.MagicMock()
+ goal_state = self._get_goal_state(
+ http_client=http_client, certificates_url=None)
+ certificates_xml = goal_state.certificates_xml
+ self.assertEqual(0, http_client.get.call_count)
+ self.assertIsNone(certificates_xml)
+
+
+class TestAzureEndpointHttpClient(TestCase):
+
+ regular_headers = {
+ 'x-ms-agent-name': 'WALinuxAgent',
+ 'x-ms-version': '2012-11-30',
+ }
+
+ def setUp(self):
+ super(TestAzureEndpointHttpClient, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.read_file_or_url = patches.enter_context(
+ mock.patch.object(azure_helper.util, 'read_file_or_url'))
+
+ def test_non_secure_get(self):
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ url = 'MyTestUrl'
+ response = client.get(url, secure=False)
+ self.assertEqual(1, self.read_file_or_url.call_count)
+ self.assertEqual(self.read_file_or_url.return_value, response)
+ self.assertEqual(mock.call(url, headers=self.regular_headers),
+ self.read_file_or_url.call_args)
+
+ def test_secure_get(self):
+ url = 'MyTestUrl'
+ certificate = mock.MagicMock()
+ expected_headers = self.regular_headers.copy()
+ expected_headers.update({
+ "x-ms-cipher-name": "DES_EDE3_CBC",
+ "x-ms-guest-agent-public-x509-cert": certificate,
+ })
+ client = azure_helper.AzureEndpointHttpClient(certificate)
+ response = client.get(url, secure=True)
+ self.assertEqual(1, self.read_file_or_url.call_count)
+ self.assertEqual(self.read_file_or_url.return_value, response)
+ self.assertEqual(mock.call(url, headers=expected_headers),
+ self.read_file_or_url.call_args)
+
+ def test_post(self):
+ data = mock.MagicMock()
+ url = 'MyTestUrl'
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ response = client.post(url, data=data)
+ self.assertEqual(1, self.read_file_or_url.call_count)
+ self.assertEqual(self.read_file_or_url.return_value, response)
+ self.assertEqual(
+ mock.call(url, data=data, headers=self.regular_headers),
+ self.read_file_or_url.call_args)
+
+ def test_post_with_extra_headers(self):
+ url = 'MyTestUrl'
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ extra_headers = {'test': 'header'}
+ client.post(url, extra_headers=extra_headers)
+ self.assertEqual(1, self.read_file_or_url.call_count)
+ expected_headers = self.regular_headers.copy()
+ expected_headers.update(extra_headers)
+ self.assertEqual(
+ mock.call(mock.ANY, data=mock.ANY, headers=expected_headers),
+ self.read_file_or_url.call_args)
+
+
+class TestOpenSSLManager(TestCase):
+
+ def setUp(self):
+ super(TestOpenSSLManager, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.subp = patches.enter_context(
+ mock.patch.object(azure_helper.util, 'subp'))
+ try:
+ self.open = patches.enter_context(
+ mock.patch('__builtin__.open'))
+ except ImportError:
+ self.open = patches.enter_context(
+ mock.patch('builtins.open'))
+
+ @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
+ @mock.patch.object(azure_helper.tempfile, 'mkdtemp')
+ def test_openssl_manager_creates_a_tmpdir(self, mkdtemp):
+ manager = azure_helper.OpenSSLManager()
+ self.assertEqual(mkdtemp.return_value, manager.tmpdir)
+
+ def test_generate_certificate_uses_tmpdir(self):
+ subp_directory = {}
+
+ def capture_directory(*args, **kwargs):
+ subp_directory['path'] = os.getcwd()
+
+ self.subp.side_effect = capture_directory
+ manager = azure_helper.OpenSSLManager()
+ self.assertEqual(manager.tmpdir, subp_directory['path'])
+
+ @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
+ @mock.patch.object(azure_helper.tempfile, 'mkdtemp', mock.MagicMock())
+ @mock.patch.object(azure_helper.util, 'del_dir')
+ def test_clean_up(self, del_dir):
+ manager = azure_helper.OpenSSLManager()
+ manager.clean_up()
+ self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list)
+
+
+class TestWALinuxAgentShim(TestCase):
+
+ def setUp(self):
+ super(TestWALinuxAgentShim, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.AzureEndpointHttpClient = patches.enter_context(
+ mock.patch.object(azure_helper, 'AzureEndpointHttpClient'))
+ self.find_endpoint = patches.enter_context(
+ mock.patch.object(
+ azure_helper.WALinuxAgentShim, 'find_endpoint'))
+ self.GoalState = patches.enter_context(
+ mock.patch.object(azure_helper, 'GoalState'))
+ self.iid_from_shared_config_content = patches.enter_context(
+ mock.patch.object(azure_helper, 'iid_from_shared_config_content'))
+ self.OpenSSLManager = patches.enter_context(
+ mock.patch.object(azure_helper, 'OpenSSLManager'))
+ patches.enter_context(
+ mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+
+ def test_http_client_uses_certificate(self):
+ shim = azure_helper.WALinuxAgentShim()
+ shim.register_with_azure_and_fetch_data()
+ self.assertEqual(
+ [mock.call(self.OpenSSLManager.return_value.certificate)],
+ self.AzureEndpointHttpClient.call_args_list)
+
+ def test_correct_url_used_for_goalstate(self):
+ self.find_endpoint.return_value = 'test_endpoint'
+ shim = azure_helper.WALinuxAgentShim()
+ shim.register_with_azure_and_fetch_data()
+ get = self.AzureEndpointHttpClient.return_value.get
+ self.assertEqual(
+ [mock.call('http://test_endpoint/machine/?comp=goalstate')],
+ get.call_args_list)
+ self.assertEqual(
+ [mock.call(get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value)],
+ self.GoalState.call_args_list)
+
+ def test_certificates_used_to_determine_public_keys(self):
+ shim = azure_helper.WALinuxAgentShim()
+ data = shim.register_with_azure_and_fetch_data()
+ self.assertEqual(
+ [mock.call(self.GoalState.return_value.certificates_xml)],
+ self.OpenSSLManager.return_value.parse_certificates.call_args_list)
+ self.assertEqual(
+ self.OpenSSLManager.return_value.parse_certificates.return_value,
+ data['public-keys'])
+
+ def test_absent_certificates_produces_empty_public_keys(self):
+ self.GoalState.return_value.certificates_xml = None
+ shim = azure_helper.WALinuxAgentShim()
+ data = shim.register_with_azure_and_fetch_data()
+ self.assertEqual([], data['public-keys'])
+
+ def test_instance_id_returned_in_data(self):
+ shim = azure_helper.WALinuxAgentShim()
+ data = shim.register_with_azure_and_fetch_data()
+ self.assertEqual(
+ [mock.call(self.GoalState.return_value.shared_config_xml)],
+ self.iid_from_shared_config_content.call_args_list)
+ self.assertEqual(self.iid_from_shared_config_content.return_value,
+ data['instance-id'])
+
+ def test_correct_url_used_for_report_ready(self):
+ self.find_endpoint.return_value = 'test_endpoint'
+ shim = azure_helper.WALinuxAgentShim()
+ shim.register_with_azure_and_fetch_data()
+ expected_url = 'http://test_endpoint/machine?comp=health'
+ self.assertEqual(
+ [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
+ self.AzureEndpointHttpClient.return_value.post.call_args_list)
+
+ def test_goal_state_values_used_for_report_ready(self):
+ self.GoalState.return_value.incarnation = 'TestIncarnation'
+ self.GoalState.return_value.container_id = 'TestContainerId'
+ self.GoalState.return_value.instance_id = 'TestInstanceId'
+ shim = azure_helper.WALinuxAgentShim()
+ shim.register_with_azure_and_fetch_data()
+ posted_document = (
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]['data']
+ )
+ self.assertIn('TestIncarnation', posted_document)
+ self.assertIn('TestContainerId', posted_document)
+ self.assertIn('TestInstanceId', posted_document)
+
+ def test_clean_up_can_be_called_at_any_time(self):
+ shim = azure_helper.WALinuxAgentShim()
+ shim.clean_up()
+
+ def test_clean_up_will_clean_up_openssl_manager_if_instantiated(self):
+ shim = azure_helper.WALinuxAgentShim()
+ shim.register_with_azure_and_fetch_data()
+ shim.clean_up()
+ self.assertEqual(
+ 1, self.OpenSSLManager.return_value.clean_up.call_count)
+
+ def test_failure_to_fetch_goalstate_bubbles_up(self):
+ class SentinelException(Exception):
+ pass
+ self.AzureEndpointHttpClient.return_value.get.side_effect = (
+ SentinelException)
+ shim = azure_helper.WALinuxAgentShim()
+ self.assertRaises(SentinelException,
+ shim.register_with_azure_and_fetch_data)
+
+
+class TestGetMetadataFromFabric(TestCase):
+
+ @mock.patch.object(azure_helper, 'WALinuxAgentShim')
+ def test_data_from_shim_returned(self, shim):
+ ret = azure_helper.get_metadata_from_fabric()
+ self.assertEqual(
+ shim.return_value.register_with_azure_and_fetch_data.return_value,
+ ret)
+
+ @mock.patch.object(azure_helper, 'WALinuxAgentShim')
+ def test_success_calls_clean_up(self, shim):
+ azure_helper.get_metadata_from_fabric()
+ self.assertEqual(1, shim.return_value.clean_up.call_count)
+
+ @mock.patch.object(azure_helper, 'WALinuxAgentShim')
+ def test_failure_in_registration_calls_clean_up(self, shim):
+ class SentinelException(Exception):
+ pass
+ shim.return_value.register_with_azure_and_fetch_data.side_effect = (
+ SentinelException)
+ self.assertRaises(SentinelException,
+ azure_helper.get_metadata_from_fabric)
+ self.assertEqual(1, shim.return_value.clean_up.call_count)
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 4280abc4..1fb100f7 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -113,10 +113,6 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.assertEqual(GCE_META.get('instance/attributes/user-data'),
self.ds.get_userdata_raw())
- # we expect a list of public ssh keys with user names stripped
- self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'],
- self.ds.get_public_ssh_keys())
-
# test partial metadata (missing user-data in particular)
@httpretty.activate
def test_metadata_partial(self):
@@ -141,3 +137,48 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
decoded = b64decode(
GCE_META_ENCODING.get('instance/attributes/user-data'))
self.assertEqual(decoded, self.ds.get_userdata_raw())
+
+ @httpretty.activate
+ def test_missing_required_keys_return_false(self):
+ for required_key in ['instance/id', 'instance/zone',
+ 'instance/hostname']:
+ meta = GCE_META_PARTIAL.copy()
+ del meta[required_key]
+ httpretty.register_uri(httpretty.GET, MD_URL_RE,
+ body=_new_request_callback(meta))
+ self.assertEqual(False, self.ds.get_data())
+ httpretty.reset()
+
+ @httpretty.activate
+ def test_project_level_ssh_keys_are_used(self):
+ httpretty.register_uri(httpretty.GET, MD_URL_RE,
+ body=_new_request_callback())
+ self.ds.get_data()
+
+ # we expect a list of public ssh keys with user names stripped
+ self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'],
+ self.ds.get_public_ssh_keys())
+
+ @httpretty.activate
+ def test_instance_level_ssh_keys_are_used(self):
+ key_content = 'ssh-rsa JustAUser root@server'
+ meta = GCE_META.copy()
+ meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content)
+
+ httpretty.register_uri(httpretty.GET, MD_URL_RE,
+ body=_new_request_callback(meta))
+ self.ds.get_data()
+
+ self.assertIn(key_content, self.ds.get_public_ssh_keys())
+
+ @httpretty.activate
+ def test_instance_level_keys_replace_project_level_keys(self):
+ key_content = 'ssh-rsa JustAUser root@server'
+ meta = GCE_META.copy()
+ meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content)
+
+ httpretty.register_uri(httpretty.GET, MD_URL_RE,
+ body=_new_request_callback(meta))
+ self.ds.get_data()
+
+ self.assertEqual([key_content], self.ds.get_public_ssh_keys())
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 28b41eaf..adee9019 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -36,8 +36,6 @@ from binascii import crc32
import serial
import six
-import six
-
from cloudinit import helpers as c_helpers
from cloudinit.sources import DataSourceSmartOS
from cloudinit.util import b64e
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
index 02cad8b2..1ed185ca 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -7,7 +7,10 @@ import os
import re
import shutil
import tempfile
-import unittest
+
+
+def load_tfile_or_url(*args, **kwargs):
+ return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents))
class TestAptProxyConfig(TestCase):
@@ -30,7 +33,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
- contents = util.load_tfile_or_url(self.pfile)
+ contents = load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_http_proxy_written(self):
@@ -40,7 +43,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
- contents = util.load_tfile_or_url(self.pfile)
+ contents = load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_all_proxy_written(self):
@@ -58,7 +61,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
- contents = util.load_tfile_or_url(self.pfile)
+ contents = load_tfile_or_url(self.pfile)
for ptype, pval in values.items():
self.assertTrue(self._search_apt_config(contents, ptype, pval))
@@ -74,7 +77,7 @@ class TestAptProxyConfig(TestCase):
cc_apt_configure.apply_apt_config({'apt_proxy': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
- contents = util.load_tfile_or_url(self.pfile)
+ contents = load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def test_config_written(self):
@@ -86,14 +89,14 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.cfile))
self.assertFalse(os.path.isfile(self.pfile))
- self.assertEqual(util.load_tfile_or_url(self.cfile), payload)
+ self.assertEqual(load_tfile_or_url(self.cfile), payload)
def test_config_replaced(self):
util.write_file(self.pfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({'apt_config': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.cfile))
- self.assertEqual(util.load_tfile_or_url(self.cfile), "foo")
+ self.assertEqual(load_tfile_or_url(self.cfile), "foo")
def test_config_deleted(self):
# if no 'apt_config' is provided, delete any previously written file
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
new file mode 100644
index 00000000..ddef8d48
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_disk_setup.py
@@ -0,0 +1,30 @@
+from cloudinit.config import cc_disk_setup
+from ..helpers import ExitStack, mock, TestCase
+
+
+class TestIsDiskUsed(TestCase):
+
+ def setUp(self):
+ super(TestIsDiskUsed, self).setUp()
+ self.patches = ExitStack()
+ mod_name = 'cloudinit.config.cc_disk_setup'
+ self.enumerate_disk = self.patches.enter_context(
+ mock.patch('{0}.enumerate_disk'.format(mod_name)))
+ self.check_fs = self.patches.enter_context(
+ mock.patch('{0}.check_fs'.format(mod_name)))
+
+ def test_multiple_child_nodes_returns_true(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2))
+ self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
+ self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+ def test_valid_filesystem_returns_true(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
+ self.check_fs.return_value = (
+ mock.MagicMock(), 'ext4', mock.MagicMock())
+ self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+ def test_one_child_nodes_and_no_fs_returns_false(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
+ self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
+ self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock()))
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index f3109bac..eceb14d9 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -38,7 +38,6 @@ class TestInstallPackages(t_help.TestCase):
if 'args' not in kwargs:
kwargs['args'] = args[0]
self.subp_called.append(kwargs)
- snap_cmds = []
args = kwargs['args']
# here we basically parse the snappy command invoked
# and append to snapcmds a list of (mode, pkg, config)
@@ -117,9 +116,6 @@ class TestInstallPackages(t_help.TestCase):
def test_package_ops_common_filename(self):
# fish package name from filename
# package names likely look like: pkgname.namespace_version_arch.snap
- fname = "xkcd-webserver.canonical_0.3.4_all.snap"
- name = "xkcd-webserver.canonical"
- shortname = "xkcd-webserver"
# find filenames
self.populate_tmp(
@@ -165,7 +161,6 @@ class TestInstallPackages(t_help.TestCase):
'ubuntu-core': {'c1': 'c2'},
'notinstalled.smoser': {'s1': 's2'},
}
- cfg = {'config-example-k1': 'config-example-k2'}
ret = get_package_ops(
packages=['config-example.canonical'], configs=cfgs,
installed=['config-example.smoser', 'pkg1.canonical',
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index cf7c03b0..0c19a2c2 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -18,10 +18,6 @@
from __future__ import print_function
-import sys
-import six
-import unittest
-
from . import helpers as test_helpers
import textwrap
@@ -30,6 +26,7 @@ from cloudinit import templater
try:
import Cheetah
HAS_CHEETAH = True
+ Cheetah # make pyflakes happy, as Cheetah is not used here
except ImportError:
HAS_CHEETAH = False
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 1619b5d2..95990165 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -459,4 +459,21 @@ class TestMessageFromString(helpers.TestCase):
roundtripped = util.message_from_string(u'\n').as_string()
self.assertNotIn('\x00', roundtripped)
+
+class TestReadSeeded(helpers.TestCase):
+ def setUp(self):
+ super(TestReadSeeded, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def test_unicode_not_messed_up(self):
+ ud = b"userdatablob"
+ helpers.populate_dir(
+ self.tmp, {'meta-data': "key1: val1", 'user-data': ud})
+ sdir = self.tmp + os.path.sep
+ (found_md, found_ud) = util.read_seeded(sdir)
+
+ self.assertEqual(found_md, {'key1': 'val1'})
+ self.assertEqual(found_ud, ud)
+
# vi: ts=4 expandtab
diff --git a/tools/hacking.py b/tools/hacking.py
index e7797564..3175df38 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -128,7 +128,7 @@ def cloud_docstring_multiline_end(physical_line):
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) == pos):
- print physical_line
+ print(physical_line)
if (physical_line[pos + 3] == ' '):
return (pos, "N403: multi line docstring end on new line")
diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py
index eda59cb8..6e164590 100755
--- a/tools/validate-yaml.py
+++ b/tools/validate-yaml.py
@@ -4,7 +4,6 @@
"""
import sys
-
import yaml
@@ -17,7 +16,7 @@ if __name__ == "__main__":
yaml.safe_load(fh.read())
fh.close()
sys.stdout.write(" - ok\n")
- except Exception, e:
+ except Exception as e:
sys.stdout.write(" - bad (%s)\n" % (e))
bads += 1
if bads > 0: