summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog10
-rwxr-xr-xcloud-init.py2
-rw-r--r--cloudinit/CloudConfig/cc_apt_pipelining.py53
-rw-r--r--cloudinit/CloudConfig/cc_resizefs.py4
-rw-r--r--cloudinit/CloudConfig/cc_salt_minion.py56
-rw-r--r--cloudinit/CloudConfig/cc_update_etc_hosts.py2
-rw-r--r--cloudinit/DataSource.py3
-rw-r--r--cloudinit/DataSourceConfigDrive.py231
-rw-r--r--cloudinit/DataSourceEc2.py86
-rw-r--r--cloudinit/DataSourceMaaS.py344
-rw-r--r--cloudinit/DataSourceNoCloud.py70
-rw-r--r--cloudinit/DataSourceOVF.py2
-rw-r--r--cloudinit/UserDataHandler.py2
-rw-r--r--cloudinit/__init__.py19
-rw-r--r--cloudinit/netinfo.py11
-rw-r--r--cloudinit/util.py258
-rw-r--r--config/cloud.cfg4
-rw-r--r--doc/configdrive/README118
-rw-r--r--doc/examples/cloud-config-chef.txt2
-rw-r--r--doc/examples/cloud-config-datasources.txt11
-rw-r--r--doc/examples/cloud-config-salt-minion.txt53
-rw-r--r--doc/examples/cloud-config.txt9
-rw-r--r--doc/nocloud/README55
-rw-r--r--tests/unittests/test__init__.py195
-rw-r--r--tests/unittests/test_datasource/test_maas.py151
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py (renamed from tests/unittests/test_handler_ca_certs.py)0
-rw-r--r--tests/unittests/test_util.py2
-rwxr-xr-xtools/run-pylint4
28 files changed, 1627 insertions, 130 deletions
diff --git a/ChangeLog b/ChangeLog
index 07b46ded..dd884961 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -20,6 +20,16 @@
- add test case framework [Mike Milner] (LP: #890851)
- fix pylint warnings [Juerg Haefliger] (LP: #914739)
- add support for adding and deleting CA Certificates [Mike Milner] (LP: #915232)
+ - in ci-info lines, use '.' to indicate empty field for easier machine reading
+ - support empty lines in "#include" files (LP: #923043)
+ - support configuration of salt minions (Jeff Bauer) (LP: #927795)
+ - DataSourceOVF: only search for OVF data on ISO9660 filesystems (LP: #898373)
+ - DataSourceConfigDrive: support getting data from openstack config drive (LP: #857378)
+ - DataSourceNoCloud: support seed from external disk of ISO or vfat (LP: #857378)
+ - DataSourceNoCloud: support inserting /etc/network/interfaces
+ - DataSourceMaaS: add data source for Ubuntu Machines as a Service (MaaS) (LP: #942061)
+ - add option 'apt_pipelining' to address issue with S3 mirrors
+ (LP: #948461) [Ben Howard]
0.6.2:
- fix bug where update was not done unless update was explicitly set.
It would not be run if 'upgrade' or packages were set to be installed
diff --git a/cloud-init.py b/cloud-init.py
index 9d559020..9e0a0405 100755
--- a/cloud-init.py
+++ b/cloud-init.py
@@ -136,7 +136,7 @@ def main():
cloud.get_data_source()
except cloudinit.DataSourceNotFoundException as e:
sys.stderr.write("no instance data found in %s\n" % cmd)
- sys.exit(1)
+ sys.exit(0)
# set this as the current instance
cloud.set_cur_instance()
diff --git a/cloudinit/CloudConfig/cc_apt_pipelining.py b/cloudinit/CloudConfig/cc_apt_pipelining.py
new file mode 100644
index 00000000..0286a9ae
--- /dev/null
+++ b/cloudinit/CloudConfig/cc_apt_pipelining.py
@@ -0,0 +1,53 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cloudinit.util as util
+from cloudinit.CloudConfig import per_instance
+
+frequency = per_instance
+default_file = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
+
+
+def handle(_name, cfg, _cloud, log, _args):
+
+ apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
+ apt_pipe_value = str(apt_pipe_value).lower()
+
+ if apt_pipe_value == "false":
+ write_apt_snippet("0", log)
+
+ elif apt_pipe_value in ("none", "unchanged", "os"):
+ return
+
+ elif apt_pipe_value in str(range(0, 6)):
+ write_apt_snippet(apt_pipe_value, log)
+
+ else:
+ log.warn("Invalid option for apt_pipeling: %s" % apt_pipe_value)
+
+
+def write_apt_snippet(setting, log, f_name=default_file):
+ """ Writes f_name with apt pipeline depth 'setting' """
+
+ acquire_pipeline_depth = 'Acquire::http::Pipeline-Depth "%s";\n'
+ file_contents = ("//Written by cloud-init per 'apt_pipelining'\n"
+ + (acquire_pipeline_depth % setting))
+
+ util.write_file(f_name, file_contents)
+
+ log.debug("Wrote %s with APT pipeline setting" % f_name)
diff --git a/cloudinit/CloudConfig/cc_resizefs.py b/cloudinit/CloudConfig/cc_resizefs.py
index 0186d4d2..c76cc664 100644
--- a/cloudinit/CloudConfig/cc_resizefs.py
+++ b/cloudinit/CloudConfig/cc_resizefs.py
@@ -49,8 +49,8 @@ def handle(_name, cfg, _cloud, log, args):
dev = os.makedev(os.major(st_dev), os.minor(st_dev))
os.mknod(devpth, 0400 | stat.S_IFBLK, dev)
except:
- if util.islxc():
- log.debug("inside lxc, ignoring mknod failure in resizefs")
+ if util.is_container():
+ log.debug("inside container, ignoring mknod failure in resizefs")
return
log.warn("Failed to make device node to resize /")
raise
diff --git a/cloudinit/CloudConfig/cc_salt_minion.py b/cloudinit/CloudConfig/cc_salt_minion.py
new file mode 100644
index 00000000..1a3b5039
--- /dev/null
+++ b/cloudinit/CloudConfig/cc_salt_minion.py
@@ -0,0 +1,56 @@
+# vi: ts=4 expandtab
+#
+# Author: Jeff Bauer <jbauer@rubic.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import os.path
+import subprocess
+import cloudinit.CloudConfig as cc
+import yaml
+
+
+def handle(_name, cfg, _cloud, _log, _args):
+ # If there isn't a salt key in the configuration don't do anything
+ if 'salt_minion' not in cfg:
+ return
+ salt_cfg = cfg['salt_minion']
+ # Start by installing the salt package ...
+ cc.install_packages(("salt",))
+ config_dir = '/etc/salt'
+ if not os.path.isdir(config_dir):
+ os.makedirs(config_dir)
+ # ... and then update the salt configuration
+ if 'conf' in salt_cfg:
+ # Add all sections from the conf object to /etc/salt/minion
+ minion_config = os.path.join(config_dir, 'minion')
+ yaml.dump(salt_cfg['conf'],
+ file(minion_config, 'w'),
+ default_flow_style=False)
+ # ... copy the key pair if specified
+ if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
+ pki_dir = '/etc/salt/pki'
+ cumask = os.umask(077)
+ if not os.path.isdir(pki_dir):
+ os.makedirs(pki_dir)
+ pub_name = os.path.join(pki_dir, 'minion.pub')
+ pem_name = os.path.join(pki_dir, 'minion.pem')
+ with open(pub_name, 'w') as f:
+ f.write(salt_cfg['public_key'])
+ with open(pem_name, 'w') as f:
+ f.write(salt_cfg['private_key'])
+ os.umask(cumask)
+
+ # Start salt-minion
+ subprocess.check_call(['service', 'salt-minion', 'start'])
diff --git a/cloudinit/CloudConfig/cc_update_etc_hosts.py b/cloudinit/CloudConfig/cc_update_etc_hosts.py
index 572e6750..6ad2fca8 100644
--- a/cloudinit/CloudConfig/cc_update_etc_hosts.py
+++ b/cloudinit/CloudConfig/cc_update_etc_hosts.py
@@ -28,7 +28,7 @@ frequency = per_always
def handle(_name, cfg, cloud, log, _args):
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- manage_hosts = util.get_cfg_option_bool(cfg, "manage_etc_hosts", False)
+ manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
if manage_hosts in ("True", "true", True, "template"):
# render from template file
try:
diff --git a/cloudinit/DataSource.py b/cloudinit/DataSource.py
index f38e3b20..1bcb9c10 100644
--- a/cloudinit/DataSource.py
+++ b/cloudinit/DataSource.py
@@ -72,6 +72,9 @@ class DataSource:
if isinstance(self.metadata['public-keys'], str):
return([self.metadata['public-keys'], ])
+ if isinstance(self.metadata['public-keys'], list):
+ return(self.metadata['public-keys'])
+
for _keyname, klist in self.metadata['public-keys'].items():
# lp:506332 uec metadata service responds with
# data that makes boto populate a string for 'klist' rather
diff --git a/cloudinit/DataSourceConfigDrive.py b/cloudinit/DataSourceConfigDrive.py
new file mode 100644
index 00000000..2db4a76a
--- /dev/null
+++ b/cloudinit/DataSourceConfigDrive.py
@@ -0,0 +1,231 @@
+# Copyright (C) 2012 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cloudinit.DataSource as DataSource
+
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
+import cloudinit.util as util
+import os.path
+import os
+import json
+import subprocess
+
+DEFAULT_IID = "iid-dsconfigdrive"
+
+
+class DataSourceConfigDrive(DataSource.DataSource):
+ seed = None
+ seeddir = base_seeddir + '/config_drive'
+ cfg = {}
+ userdata_raw = None
+ metadata = None
+ dsmode = "local"
+
+ def __str__(self):
+ mstr = "DataSourceConfigDrive[%s]" % self.dsmode
+ mstr = mstr + " [seed=%s]" % self.seed
+ return(mstr)
+
+ def get_data(self):
+ found = None
+ md = {}
+ ud = ""
+
+ defaults = {"instance-id": DEFAULT_IID, "dsmode": "pass"}
+
+ if os.path.isdir(self.seeddir):
+ try:
+ (md, ud) = read_config_drive_dir(self.seeddir)
+ found = self.seeddir
+ except nonConfigDriveDir:
+ pass
+
+ if not found:
+ dev = cfg_drive_device()
+ if dev:
+ try:
+ (md, ud) = util.mount_callback_umount(dev,
+ read_config_drive_dir)
+ found = dev
+ except (nonConfigDriveDir, util.mountFailedError):
+ pass
+
+ if not found:
+ return False
+
+ if 'dsconfig' in md:
+ self.cfg = md['dscfg']
+
+ md = util.mergedict(md, defaults)
+
+ # update interfaces and ifup only on the local datasource
+ # this way the DataSourceConfigDriveNet doesn't do it also.
+ if 'network-interfaces' in md and self.dsmode == "local":
+ if md['dsmode'] == "pass":
+ log.info("updating network interfaces from configdrive")
+ else:
+ log.debug("updating network interfaces from configdrive")
+
+ util.write_file("/etc/network/interfaces",
+ md['network-interfaces'])
+ try:
+ (out, err) = util.subp(['ifup', '--all'])
+ if len(out) or len(err):
+ log.warn("ifup --all had stderr: %s" % err)
+
+ except subprocess.CalledProcessError as exc:
+ log.warn("ifup --all failed: %s" % (exc.output[1]))
+
+ self.seed = found
+ self.metadata = md
+ self.userdata_raw = ud
+
+ if md['dsmode'] == self.dsmode:
+ return True
+
+ log.debug("%s: not claiming datasource, dsmode=%s" %
+ (self, md['dsmode']))
+ return False
+
+ def get_public_ssh_keys(self):
+ if not 'public-keys' in self.metadata:
+ return([])
+ return(self.metadata['public-keys'])
+
+ # the data sources' config_obj is a cloud-config formated
+ # object that came to it from ways other than cloud-config
+ # because cloud-config content would be handled elsewhere
+ def get_config_obj(self):
+ return(self.cfg)
+
+
+class DataSourceConfigDriveNet(DataSourceConfigDrive):
+ dsmode = "net"
+
+
+class nonConfigDriveDir(Exception):
+ pass
+
+
+def cfg_drive_device():
+ """ get the config drive device. return a string like '/dev/vdb'
+ or None (if there is no non-root device attached). This does not
+ check the contents, only reports that if there *were* a config_drive
+ attached, it would be this device.
+ per config_drive documentation, this is
+ "associated as the last available disk on the instance"
+ """
+
+ if 'CLOUD_INIT_CONFIG_DRIVE_DEVICE' in os.environ:
+ return(os.environ['CLOUD_INIT_CONFIG_DRIVE_DEVICE'])
+
+ # we are looking for a raw block device (sda, not sda1) with a vfat
+ # filesystem on it.
+
+ letters = "abcdefghijklmnopqrstuvwxyz"
+ devs = util.find_devs_with("TYPE=vfat")
+
+ # filter out anything not ending in a letter (ignore partitions)
+ devs = [f for f in devs if f[-1] in letters]
+
+ # sort them in reverse so "last" device is first
+ devs.sort(reverse=True)
+
+ if len(devs):
+ return(devs[0])
+
+ return(None)
+
+
+def read_config_drive_dir(source_dir):
+ """
+ read_config_drive_dir(source_dir):
+ read source_dir, and return a tuple with metadata dict and user-data
+ string populated. If not a valid dir, raise a nonConfigDriveDir
+ """
+ md = {}
+ ud = ""
+
+ flist = ("etc/network/interfaces", "root/.ssh/authorized_keys", "meta.js")
+ found = [f for f in flist if os.path.isfile("%s/%s" % (source_dir, f))]
+ keydata = ""
+
+ if len(found) == 0:
+ raise nonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
+
+ if "etc/network/interfaces" in found:
+ with open("%s/%s" % (source_dir, "/etc/network/interfaces")) as fp:
+ md['network-interfaces'] = fp.read()
+
+ if "root/.ssh/authorized_keys" in found:
+ with open("%s/%s" % (source_dir, "root/.ssh/authorized_keys")) as fp:
+ keydata = fp.read()
+
+ meta_js = {}
+
+ if "meta.js" in found:
+ content = ''
+ with open("%s/%s" % (source_dir, "meta.js")) as fp:
+ content = fp.read()
+ md['meta_js'] = content
+ try:
+ meta_js = json.loads(content)
+ except ValueError:
+ raise nonConfigDriveDir("%s: %s" %
+ (source_dir, "invalid json in meta.js"))
+
+ keydata = meta_js.get('public-keys', keydata)
+
+ if keydata:
+ lines = keydata.splitlines()
+ md['public-keys'] = [l for l in lines
+ if len(l) and not l.startswith("#")]
+
+ for copy in ('dsmode', 'instance-id', 'dscfg'):
+ if copy in meta_js:
+ md[copy] = meta_js[copy]
+
+ if 'user-data' in meta_js:
+ ud = meta_js['user-data']
+
+ return(md, ud)
+
+datasources = (
+ (DataSourceConfigDrive, (DataSource.DEP_FILESYSTEM, )),
+ (DataSourceConfigDriveNet,
+ (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+)
+
+
+# return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return(DataSource.list_from_depends(depends, datasources))
+
+if __name__ == "__main__":
+ def main():
+ import sys
+ import pprint
+ print cfg_drive_device()
+ (md, ud) = read_config_drive_dir(sys.argv[1])
+ print "=== md ==="
+ pprint.pprint(md)
+ print "=== ud ==="
+ print(ud)
+
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/DataSourceEc2.py b/cloudinit/DataSourceEc2.py
index 06635746..7051ecda 100644
--- a/cloudinit/DataSourceEc2.py
+++ b/cloudinit/DataSourceEc2.py
@@ -24,7 +24,6 @@ from cloudinit import seeddir as base_seeddir
from cloudinit import log
import cloudinit.util as util
import socket
-import urllib2
import time
import boto.utils as boto_utils
import os.path
@@ -134,8 +133,8 @@ class DataSourceEc2(DataSource.DataSource):
url2base[cur] = url
starttime = time.time()
- url = wait_for_metadata_service(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=log.warn)
+ url = util.wait_for_url(urls=urls, max_wait=max_wait,
+ timeout=timeout, status_cb=log.warn)
if url:
log.debug("Using metadata source: '%s'" % url2base[url])
@@ -208,87 +207,6 @@ class DataSourceEc2(DataSource.DataSource):
return False
-def wait_for_metadata_service(urls, max_wait=None, timeout=None,
- status_cb=None):
- """
- urls: a list of urls to try
- max_wait: roughly the maximum time to wait before giving up
- The max time is *actually* len(urls)*timeout as each url will
- be tried once and given the timeout provided.
- timeout: the timeout provided to urllib2.urlopen
- status_cb: call method with string message when a url is not available
-
- the idea of this routine is to wait for the EC2 metdata service to
- come up. On both Eucalyptus and EC2 we have seen the case where
- the instance hit the MD before the MD service was up. EC2 seems
- to have permenantely fixed this, though.
-
- In openstack, the metadata service might be painfully slow, and
- unable to avoid hitting a timeout of even up to 10 seconds or more
- (LP: #894279) for a simple GET.
-
- Offset those needs with the need to not hang forever (and block boot)
- on a system where cloud-init is configured to look for EC2 Metadata
- service but is not going to find one. It is possible that the instance
- data host (169.254.169.254) may be firewalled off Entirely for a sytem,
- meaning that the connection will block forever unless a timeout is set.
- """
- starttime = time.time()
-
- sleeptime = 1
-
- def nullstatus_cb(msg):
- return
-
- if status_cb == None:
- status_cb = nullstatus_cb
-
- def timeup(max_wait, starttime):
- return((max_wait <= 0 or max_wait == None) or
- (time.time() - starttime > max_wait))
-
- loop_n = 0
- while True:
- sleeptime = int(loop_n / 5) + 1
- for url in urls:
- now = time.time()
- if loop_n != 0:
- if timeup(max_wait, starttime):
- break
- if timeout and (now + timeout > (starttime + max_wait)):
- # shorten timeout to not run way over max_time
- timeout = int((starttime + max_wait) - now)
-
- reason = ""
- try:
- req = urllib2.Request(url)
- resp = urllib2.urlopen(req, timeout=timeout)
- if resp.read() != "":
- return url
- reason = "empty data [%s]" % resp.getcode()
- except urllib2.HTTPError as e:
- reason = "http error [%s]" % e.code
- except urllib2.URLError as e:
- reason = "url error [%s]" % e.reason
- except socket.timeout as e:
- reason = "socket timeout [%s]" % e
- except Exception as e:
- reason = "unexpected error [%s]" % e
-
- if log:
- status_cb("'%s' failed [%s/%ss]: %s" %
- (url, int(time.time() - starttime), max_wait,
- reason))
-
- if timeup(max_wait, starttime):
- break
-
- loop_n = loop_n + 1
- time.sleep(sleeptime)
-
- return False
-
-
datasources = [
(DataSourceEc2, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
]
diff --git a/cloudinit/DataSourceMaaS.py b/cloudinit/DataSourceMaaS.py
new file mode 100644
index 00000000..fd9d6316
--- /dev/null
+++ b/cloudinit/DataSourceMaaS.py
@@ -0,0 +1,344 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cloudinit.DataSource as DataSource
+
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
+import cloudinit.util as util
+import errno
+import oauth.oauth as oauth
+import os.path
+import urllib2
+import time
+
+
+MD_VERSION = "2012-03-01"
+
+
+class DataSourceMaaS(DataSource.DataSource):
+ """
+ DataSourceMaaS reads instance information from MaaS.
+ Given a config metadata_url, and oauth tokens, it expects to find
+ files under the root named:
+ instance-id
+ user-data
+ hostname
+ """
+ seeddir = base_seeddir + '/maas'
+ baseurl = None
+
+ def __str__(self):
+ return("DataSourceMaaS[%s]" % self.baseurl)
+
+ def get_data(self):
+ mcfg = self.ds_cfg
+
+ try:
+ (userdata, metadata) = read_maas_seed_dir(self.seeddir)
+ self.userdata_raw = userdata
+ self.metadata = metadata
+ self.baseurl = self.seeddir
+ return True
+ except MaasSeedDirNone:
+ pass
+ except MaasSeedDirMalformed as exc:
+ log.warn("%s was malformed: %s\n" % (self.seeddir, exc))
+ raise
+
+ try:
+ # if there is no metadata_url, then we're not configured
+ url = mcfg.get('metadata_url', None)
+ if url == None:
+ return False
+
+ if not self.wait_for_metadata_service(url):
+ return False
+
+ self.baseurl = url
+
+ (userdata, metadata) = read_maas_seed_url(self.baseurl,
+ self.md_headers)
+ self.userdata_raw = userdata
+ self.metadata = metadata
+ return True
+ except Exception:
+ util.logexc(log)
+ return False
+
+ def md_headers(self, url):
+ mcfg = self.ds_cfg
+
+ # if we are missing token_key, token_secret or consumer_key
+ # then just do non-authed requests
+ for required in ('token_key', 'token_secret', 'consumer_key'):
+ if required not in mcfg:
+ return({})
+
+ consumer_secret = mcfg.get('consumer_secret', "")
+
+ return(oauth_headers(url=url, consumer_key=mcfg['consumer_key'],
+ token_key=mcfg['token_key'], token_secret=mcfg['token_secret'],
+ consumer_secret=consumer_secret))
+
+ def wait_for_metadata_service(self, url):
+ mcfg = self.ds_cfg
+
+ max_wait = 120
+ try:
+ max_wait = int(mcfg.get("max_wait", max_wait))
+ except Exception:
+ util.logexc(log)
+ log.warn("Failed to get max wait. using %s" % max_wait)
+
+ if max_wait == 0:
+ return False
+
+ timeout = 50
+ try:
+ timeout = int(mcfg.get("timeout", timeout))
+ except Exception:
+ util.logexc(log)
+ log.warn("Failed to get timeout, using %s" % timeout)
+
+ starttime = time.time()
+ check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
+ url = util.wait_for_url(urls=[check_url], max_wait=max_wait,
+ timeout=timeout, status_cb=log.warn,
+ headers_cb=self.md_headers)
+
+ if url:
+ log.debug("Using metadata source: '%s'" % url)
+ else:
+ log.critical("giving up on md after %i seconds\n" %
+ int(time.time() - starttime))
+
+ return (bool(url))
+
+
+def read_maas_seed_dir(seed_d):
+ """
+ Return user-data and metadata for a maas seed dir in seed_d.
+ Expected format of seed_d are the following files:
+ * instance-id
+ * local-hostname
+ * user-data
+ """
+ files = ('local-hostname', 'instance-id', 'user-data')
+ md = {}
+
+ if not os.path.isdir(seed_d):
+ raise MaasSeedDirNone("%s: not a directory")
+
+ for fname in files:
+ try:
+ with open(os.path.join(seed_d, fname)) as fp:
+ md[fname] = fp.read()
+ fp.close()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ return(check_seed_contents(md, seed_d))
+
+
+def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
+ version=MD_VERSION):
+ """
+ Read the maas datasource at seed_url.
+ header_cb is a method that should return a headers dictionary that will
+ be given to urllib2.Request()
+
+ Expected format of seed_url is are the following files:
+ * <seed_url>/<version>/instance-id
+ * <seed_url>/<version>/local-hostname
+ * <seed_url>/<version>/user-data
+ """
+ files = ('meta-data/local-hostname', 'meta-data/instance-id', 'user-data')
+
+ base_url = "%s/%s" % (seed_url, version)
+ md = {}
+ for fname in files:
+ url = "%s/%s" % (base_url, fname)
+ if header_cb:
+ headers = header_cb(url)
+ else:
+ headers = {}
+
+ try:
+ req = urllib2.Request(url, data=None, headers=headers)
+ resp = urllib2.urlopen(req, timeout=timeout)
+ md[os.path.basename(fname)] = resp.read()
+ except urllib2.HTTPError as e:
+ if e.code != 404:
+ raise
+
+ return(check_seed_contents(md, seed_url))
+
+
+def check_seed_contents(content, seed):
+ """Validate if content is Is the content a dict that is valid as a
+ return for a datasource.
+ Either return a (userdata, metadata) tuple or
+ Raise MaasSeedDirMalformed or MaasSeedDirNone
+ """
+ md_required = ('instance-id', 'local-hostname')
+ found = content.keys()
+
+ if len(content) == 0:
+ raise MaasSeedDirNone("%s: no data files found" % seed)
+
+ missing = [k for k in md_required if k not in found]
+ if len(missing):
+ raise MaasSeedDirMalformed("%s: missing files %s" % (seed, missing))
+
+ userdata = content.get('user-data', "")
+ md = {}
+ for (key, val) in content.iteritems():
+ if key == 'user-data':
+ continue
+ md[key] = val
+
+ return(userdata, md)
+
+
+def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret):
+ consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
+ token = oauth.OAuthToken(token_key, token_secret)
+ params = {
+ 'oauth_version': "1.0",
+ 'oauth_nonce': oauth.generate_nonce(),
+ 'oauth_timestamp': int(time.time()),
+ 'oauth_token': token.key,
+ 'oauth_consumer_key': consumer.key,
+ }
+ req = oauth.OAuthRequest(http_url=url, parameters=params)
+ req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
+ consumer, token)
+ return(req.to_header())
+
+
+class MaasSeedDirNone(Exception):
+ pass
+
+
+class MaasSeedDirMalformed(Exception):
+ pass
+
+
+datasources = [
+ (DataSourceMaaS, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+]
+
+
+# return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return(DataSource.list_from_depends(depends, datasources))
+
+
+if __name__ == "__main__":
+ def main():
+ """
+ Call with single argument of directory or http or https url.
+ If url is given additional arguments are allowed, which will be
+ interpreted as consumer_key, token_key, token_secret, consumer_secret
+ """
+ import argparse
+ import pprint
+
+ parser = argparse.ArgumentParser(description='Interact with Maas DS')
+ parser.add_argument("--config", metavar="file",
+ help="specify DS config file", default=None)
+ parser.add_argument("--ckey", metavar="key",
+ help="the consumer key to auth with", default=None)
+ parser.add_argument("--tkey", metavar="key",
+ help="the token key to auth with", default=None)
+ parser.add_argument("--csec", metavar="secret",
+ help="the consumer secret (likely '')", default="")
+ parser.add_argument("--tsec", metavar="secret",
+ help="the token secret to auth with", default=None)
+ parser.add_argument("--apiver", metavar="version",
+ help="the apiver to use ("" can be used)", default=MD_VERSION)
+
+ subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
+ subcmds.add_parser('crawl', help="crawl the datasource")
+ subcmds.add_parser('get', help="do a single GET of provided url")
+ subcmds.add_parser('check-seed', help="read andn verify seed at url")
+
+ parser.add_argument("url", help="the data source to query")
+
+ args = parser.parse_args()
+
+ creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
+ 'token_secret': args.tsec, 'consumer_secret': args.csec}
+
+ if args.config:
+ import yaml
+ with open(args.config) as fp:
+ cfg = yaml.load(fp)
+ if 'datasource' in cfg:
+ cfg = cfg['datasource']['MaaS']
+ for key in creds.keys():
+ if key in cfg and creds[key] == None:
+ creds[key] = cfg[key]
+
+ def geturl(url, headers_cb):
+ req = urllib2.Request(url, data=None, headers=headers_cb(url))
+ return(urllib2.urlopen(req).read())
+
+ def printurl(url, headers_cb):
+ print "== %s ==\n%s\n" % (url, geturl(url, headers_cb))
+
+ def crawl(url, headers_cb=None):
+ if url.endswith("/"):
+ for line in geturl(url, headers_cb).splitlines():
+ if line.endswith("/"):
+ crawl("%s%s" % (url, line), headers_cb)
+ else:
+ printurl("%s%s" % (url, line), headers_cb)
+ else:
+ printurl(url, headers_cb)
+
+ def my_headers(url):
+ headers = {}
+ if creds.get('consumer_key', None) != None:
+ headers = oauth_headers(url, **creds)
+ return headers
+
+ if args.subcmd == "check-seed":
+ if args.url.startswith("http"):
+ (userdata, metadata) = read_maas_seed_url(args.url,
+ header_cb=my_headers, version=args.apiver)
+ else:
+ (userdata, metadata) = read_maas_seed_url(args.url)
+ print "=== userdata ==="
+ print userdata
+ print "=== metadata ==="
+ pprint.pprint(metadata)
+
+ elif args.subcmd == "get":
+ printurl(args.url, my_headers)
+
+ elif args.subcmd == "crawl":
+ if not args.url.endswith("/"):
+ args.url = "%s/" % args.url
+ crawl(args.url, my_headers)
+
+ main()
diff --git a/cloudinit/DataSourceNoCloud.py b/cloudinit/DataSourceNoCloud.py
index fa64f2e5..62ecc088 100644
--- a/cloudinit/DataSourceNoCloud.py
+++ b/cloudinit/DataSourceNoCloud.py
@@ -23,6 +23,8 @@ import cloudinit.DataSource as DataSource
from cloudinit import seeddir as base_seeddir
from cloudinit import log
import cloudinit.util as util
+import errno
+import subprocess
class DataSourceNoCloud(DataSource.DataSource):
@@ -30,6 +32,7 @@ class DataSourceNoCloud(DataSource.DataSource):
userdata = None
userdata_raw = None
supported_seed_starts = ("/", "file://")
+ dsmode = "local"
seed = None
cmdline_id = "ds=nocloud"
seeddir = base_seeddir + '/nocloud'
@@ -41,7 +44,7 @@ class DataSourceNoCloud(DataSource.DataSource):
def get_data(self):
defaults = {
- "instance-id": "nocloud"
+ "instance-id": "nocloud", "dsmode": self.dsmode
}
found = []
@@ -64,13 +67,47 @@ class DataSourceNoCloud(DataSource.DataSource):
found.append(self.seeddir)
log.debug("using seeded cache data in %s" % self.seeddir)
+ fslist = util.find_devs_with("TYPE=vfat")
+ fslist.extend(util.find_devs_with("TYPE=iso9660"))
+
+ label_list = util.find_devs_with("LABEL=cidata")
+ devlist = list(set(fslist) & set(label_list))
+ devlist.sort(reverse=True)
+
+ for dev in devlist:
+ try:
+ (newmd, newud) = util.mount_callback_umount(dev,
+ util.read_seeded)
+ md = util.mergedict(newmd, md)
+ ud = newud
+
+ # for seed from a device, the default mode is 'net'.
+ # that is more likely to be what is desired.
+ # If they want dsmode of local, then they must
+ # specify that.
+ if 'dsmode' not in md:
+ md['dsmode'] = "net"
+
+ log.debug("using data from %s" % dev)
+ found.append(dev)
+ break
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ except util.mountFailedError:
+ log.warn("Failed to mount %s when looking for seed" % dev)
+
# there was no indication on kernel cmdline or data
# in the seeddir suggesting this handler should be used.
if len(found) == 0:
return False
+ seeded_interfaces = None
+
# the special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
+ # its primarily value is in allowing the user to type less
+ # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
if "seedfrom" in md:
seedfrom = md["seedfrom"]
seedfound = False
@@ -83,6 +120,9 @@ class DataSourceNoCloud(DataSource.DataSource):
(seedfrom, self.__class__))
return False
+ if 'network-interfaces' in md:
+ seeded_interfaces = self.dsmode
+
# this could throw errors, but the user told us to do it
# so if errors are raised, let them raise
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
@@ -93,10 +133,35 @@ class DataSourceNoCloud(DataSource.DataSource):
found.append(seedfrom)
md = util.mergedict(md, defaults)
+
+ # update the network-interfaces if metadata had 'network-interfaces'
+ # entry and this is the local datasource, or 'seedfrom' was used
+ # and the source of the seed was self.dsmode
+ # ('local' for NoCloud, 'net' for NoCloudNet')
+ if ('network-interfaces' in md and
+ (self.dsmode in ("local", seeded_interfaces))):
+ log.info("updating network interfaces from nocloud")
+
+ util.write_file("/etc/network/interfaces",
+ md['network-interfaces'])
+ try:
+ (out, err) = util.subp(['ifup', '--all'])
+ if len(out) or len(err):
+ log.warn("ifup --all had stderr: %s" % err)
+
+ except subprocess.CalledProcessError as exc:
+ log.warn("ifup --all failed: %s" % (exc.output[1]))
+
self.seed = ",".join(found)
self.metadata = md
self.userdata_raw = ud
- return True
+
+ if md['dsmode'] == self.dsmode:
+ return True
+
+ log.debug("%s: not claiming datasource, dsmode=%s" %
+ (self, md['dsmode']))
+ return False
# returns true or false indicating if cmdline indicated
@@ -145,6 +210,7 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
cmdline_id = "ds=nocloud-net"
supported_seed_starts = ("http://", "https://", "ftp://")
seeddir = base_seeddir + '/nocloud-net'
+ dsmode = "net"
datasources = (
diff --git a/cloudinit/DataSourceOVF.py b/cloudinit/DataSourceOVF.py
index 1f2b622e..a0b1b518 100644
--- a/cloudinit/DataSourceOVF.py
+++ b/cloudinit/DataSourceOVF.py
@@ -162,7 +162,7 @@ def get_ovf_env(dirname):
# transport functions take no input and return
# a 3 tuple of content, path, filename
-def transport_iso9660(require_iso=False):
+def transport_iso9660(require_iso=True):
# default_regex matches values in
# /lib/udev/rules.d/60-cdrom_id.rules
diff --git a/cloudinit/UserDataHandler.py b/cloudinit/UserDataHandler.py
index 93d1d36a..98729056 100644
--- a/cloudinit/UserDataHandler.py
+++ b/cloudinit/UserDataHandler.py
@@ -71,6 +71,8 @@ def do_include(content, appendmsg):
line = line[len("#include"):].lstrip()
if line.startswith("#"):
continue
+ if line.strip() == "":
+ continue
# urls cannot not have leading or trailing white space
msum = hashlib.md5() # pylint: disable=E1101
diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py
index 7a34e053..9f188766 100644
--- a/cloudinit/__init__.py
+++ b/cloudinit/__init__.py
@@ -29,7 +29,7 @@ cfg_env_name = "CLOUD_CFG"
cfg_builtin = """
log_cfgs: []
-datasource_list: ["NoCloud", "OVF", "Ec2"]
+datasource_list: ["NoCloud", "ConfigDrive", "OVF", "MaaS", "Ec2" ]
def_log_file: /var/log/cloud-init.log
syslog_fix_perms: syslog:adm
"""
@@ -60,7 +60,6 @@ import cPickle
import sys
import os.path
import errno
-import pwd
import subprocess
import yaml
import logging
@@ -572,10 +571,14 @@ def handler_handle_part(mod, data, ctype, filename, payload, frequency):
if not (modfreq == per_always or
(frequency == per_instance and modfreq == per_instance)):
return
- if mod.handler_version == 1:
- mod.handle_part(data, ctype, filename, payload)
- else:
- mod.handle_part(data, ctype, filename, payload, frequency)
+ try:
+ if mod.handler_version == 1:
+ mod.handle_part(data, ctype, filename, payload)
+ else:
+ mod.handle_part(data, ctype, filename, payload, frequency)
+ except:
+ util.logexc(log)
+ traceback.print_exc(file=sys.stderr)
def partwalker_handle_handler(pdata, _ctype, _filename, payload):
@@ -586,15 +589,13 @@ def partwalker_handle_handler(pdata, _ctype, _filename, payload):
modfname = modname + ".py"
util.write_file("%s/%s" % (pdata['handlerdir'], modfname), payload, 0600)
- pdata['handlercount'] = curcount + 1
-
try:
mod = __import__(modname)
handler_register(mod, pdata['handlers'], pdata['data'], frequency)
+ pdata['handlercount'] = curcount + 1
except:
util.logexc(log)
traceback.print_exc(file=sys.stderr)
- return
def partwalker_callback(pdata, ctype, filename, payload):
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index be7ed3a9..7e07812e 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -22,7 +22,7 @@
import subprocess
-def netdev_info():
+def netdev_info(empty=""):
fields = ("hwaddr", "addr", "bcast", "mask")
ifcfg_out = str(subprocess.check_output(["ifconfig", "-a"]))
devs = {}
@@ -59,6 +59,13 @@ def netdev_info():
pass
elif toks[i].startswith("%s:" % field):
devs[curdev][target] = toks[i][len(field) + 1:]
+
+ if empty != "":
+ for (_devname, dev) in devs.iteritems():
+ for field in dev:
+ if dev[field] == "":
+ dev[field] = empty
+
return(devs)
@@ -85,7 +92,7 @@ def getgateway():
def debug_info(pre="ci-info: "):
lines = []
try:
- netdev = netdev_info()
+ netdev = netdev_info(empty=".")
except Exception:
lines.append("netdev_info failed!")
netdev = {}
diff --git a/cloudinit/util.py b/cloudinit/util.py
index e6489648..9133426c 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -32,6 +32,7 @@ import re
import socket
import sys
import time
+import tempfile
import traceback
import urlparse
@@ -208,16 +209,18 @@ def runparts(dirp, skip_no_exist=True):
if skip_no_exist and not os.path.isdir(dirp):
return
- # per bug 857926, Fedora's run-parts will exit failure on empty dir
- if os.path.isdir(dirp) and os.listdir(dirp) == []:
- return
-
- cmd = ['run-parts', '--regex', '.*', dirp]
- sp = subprocess.Popen(cmd)
- sp.communicate()
- if sp.returncode is not 0:
- raise subprocess.CalledProcessError(sp.returncode, cmd)
- return
+ failed = 0
+ for exe_name in sorted(os.listdir(dirp)):
+ exe_path = os.path.join(dirp, exe_name)
+ if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
+ popen = subprocess.Popen([exe_path])
+ popen.communicate()
+ if popen.returncode is not 0:
+ failed += 1
+ sys.stderr.write("failed: %s [%i]\n" %
+ (exe_path, popen.returncode))
+ if failed:
+ raise RuntimeError('runparts: %i failures' % failed)
def subp(args, input_=None):
@@ -515,30 +518,70 @@ def dos2unix(string):
return(string.replace('\r\n', '\n'))
-def islxc():
- # is this host running lxc?
+def is_container():
+ # is this code running in a container of some sort
+
+ for helper in ('running-in-container', 'lxc-is-container'):
+ try:
+ # try to run a helper program. if it returns true
+ # then we're inside a container. otherwise, no
+ sp = subprocess.Popen(helper, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ sp.communicate(None)
+ return(sp.returncode == 0)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ # this code is largely from the logic in
+ # ubuntu's /etc/init/container-detect.conf
try:
- with open("/proc/1/cgroup") as f:
- if f.read() == "/":
- return True
+ # Detect old-style libvirt
+ # Detect OpenVZ containers
+ pid1env = get_proc_env(1)
+ if "container" in pid1env:
+ return True
+
+ if "LIBVIRT_LXC_UUID" in pid1env:
+ return True
+
except IOError as e:
if e.errno != errno.ENOENT:
- raise
+ pass
+
+ # Detect OpenVZ containers
+ if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
+ return True
try:
- # try to run a program named 'lxc-is-container'. if it returns true,
- # then we're inside a container. otherwise, no
- sp = subprocess.Popen(['lxc-is-container'], stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- sp.communicate(None)
- return(sp.returncode == 0)
- except OSError as e:
+ # Detect Vserver containers
+ with open("/proc/self/status") as fp:
+ lines = fp.read().splitlines()
+ for line in lines:
+ if line.startswith("VxID:"):
+ (_key, val) = line.strip().split(":", 1)
+ if val != "0":
+ return True
+ except IOError as e:
if e.errno != errno.ENOENT:
- raise
+ pass
return False
+def get_proc_env(pid):
+ # return the environment in a dict that a given process id was started with
+ env = {}
+ with open("/proc/%s/environ" % pid) as fp:
+ toks = fp.read().split("\0")
+ for tok in toks:
+ if tok == "":
+ continue
+ (name, val) = tok.split("=", 1)
+ env[name] = val
+ return env
+
+
def get_hostname_fqdn(cfg, cloud):
# return the hostname and fqdn from 'cfg'. If not found in cfg,
# then fall back to data from cloud
@@ -630,3 +673,170 @@ def close_stdin():
return
with open(os.devnull) as fp:
os.dup2(fp.fileno(), sys.stdin.fileno())
+
+
+def find_devs_with(criteria):
+ """
+ find devices matching given criteria (via blkid)
+ criteria can be *one* of:
+ TYPE=<filesystem>
+ LABEL=<label>
+ UUID=<uuid>
+ """
+ try:
+ (out, _err) = subp(['blkid', '-t%s' % criteria, '-odevice'])
+ except subprocess.CalledProcessError:
+ return([])
+ return(str(out).splitlines())
+
+
+class mountFailedError(Exception):
+ pass
+
+
+def mount_callback_umount(device, callback, data=None):
+ """
+ mount the device, call method 'callback' passing the directory
+ in which it was mounted, then unmount. Return whatever 'callback'
+ returned. If data != None, also pass data to callback.
+ """
+
+ def _cleanup(umount, tmpd):
+ if umount:
+ try:
+ subp(["umount", '-l', umount])
+ except subprocess.CalledProcessError:
+ raise
+ if tmpd:
+ os.rmdir(tmpd)
+
+ # go through mounts to see if it was already mounted
+ fp = open("/proc/mounts")
+ mounts = fp.readlines()
+ fp.close()
+
+ tmpd = None
+
+ mounted = {}
+ for mpline in mounts:
+ (dev, mp, fstype, _opts, _freq, _passno) = mpline.split()
+ mp = mp.replace("\\040", " ")
+ mounted[dev] = (dev, fstype, mp, False)
+
+ umount = False
+ if device in mounted:
+ mountpoint = "%s/" % mounted[device][2]
+ else:
+ tmpd = tempfile.mkdtemp()
+
+ mountcmd = ["mount", "-o", "ro", device, tmpd]
+
+ try:
+ (_out, _err) = subp(mountcmd)
+ umount = tmpd
+ except subprocess.CalledProcessError as exc:
+ _cleanup(umount, tmpd)
+ raise mountFailedError(exc.output[1])
+
+ mountpoint = "%s/" % tmpd
+
+ try:
+ if data == None:
+ ret = callback(mountpoint)
+ else:
+ ret = callback(mountpoint, data)
+
+ except Exception as exc:
+ _cleanup(umount, tmpd)
+ raise exc
+
+ _cleanup(umount, tmpd)
+
+ return(ret)
+
+
+def wait_for_url(urls, max_wait=None, timeout=None,
+ status_cb=None, headers_cb=None):
+ """
+ urls: a list of urls to try
+ max_wait: roughly the maximum time to wait before giving up
+ The max time is *actually* len(urls)*timeout as each url will
+ be tried once and given the timeout provided.
+ timeout: the timeout provided to urllib2.urlopen
+ status_cb: call method with string message when a url is not available
+ headers_cb: call method with single argument of url to get headers
+ for request.
+
+ the idea of this routine is to wait for the EC2 metdata service to
+ come up. On both Eucalyptus and EC2 we have seen the case where
+ the instance hit the MD before the MD service was up. EC2 seems
+ to have permenantely fixed this, though.
+
+ In openstack, the metadata service might be painfully slow, and
+ unable to avoid hitting a timeout of even up to 10 seconds or more
+ (LP: #894279) for a simple GET.
+
+ Offset those needs with the need to not hang forever (and block boot)
+ on a system where cloud-init is configured to look for EC2 Metadata
+ service but is not going to find one. It is possible that the instance
+ data host (169.254.169.254) may be firewalled off Entirely for a sytem,
+ meaning that the connection will block forever unless a timeout is set.
+ """
+ starttime = time.time()
+
+ sleeptime = 1
+
+ def nullstatus_cb(msg):
+ return
+
+ if status_cb == None:
+ status_cb = nullstatus_cb
+
+ def timeup(max_wait, starttime):
+ return((max_wait <= 0 or max_wait == None) or
+ (time.time() - starttime > max_wait))
+
+ loop_n = 0
+ while True:
+ sleeptime = int(loop_n / 5) + 1
+ for url in urls:
+ now = time.time()
+ if loop_n != 0:
+ if timeup(max_wait, starttime):
+ break
+ if timeout and (now + timeout > (starttime + max_wait)):
+ # shorten timeout to not run way over max_time
+ timeout = int((starttime + max_wait) - now)
+
+ reason = ""
+ try:
+ if headers_cb != None:
+ headers = headers_cb(url)
+ else:
+ headers = {}
+
+ req = urllib2.Request(url, data=None, headers=headers)
+ resp = urllib2.urlopen(req, timeout=timeout)
+ if resp.read() != "":
+ return url
+ reason = "empty data [%s]" % resp.getcode()
+ except urllib2.HTTPError as e:
+ reason = "http error [%s]" % e.code
+ except urllib2.URLError as e:
+ reason = "url error [%s]" % e.reason
+ except socket.timeout as e:
+ reason = "socket timeout [%s]" % e
+ except Exception as e:
+ reason = "unexpected error [%s]" % e
+
+ status_cb("'%s' failed [%s/%ss]: %s" %
+ (url, int(time.time() - starttime), max_wait,
+ reason))
+
+ if timeup(max_wait, starttime):
+ break
+
+ loop_n = loop_n + 1
+ time.sleep(sleeptime)
+
+ return False
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 25d02cee..e9e3bb6c 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -1,7 +1,7 @@
user: ubuntu
disable_root: 1
preserve_hostname: False
-# datasource_list: [ "NoCloud", "OVF", "Ec2" ]
+# datasource_list: ["NoCloud", "ConfigDrive", "OVF", "MaaS", "Ec2" ]
cloud_init_modules:
- bootcmd
@@ -19,11 +19,13 @@ cloud_config_modules:
- locale
- set-passwords
- grub-dpkg
+ - apt-pipelining
- apt-update-upgrade
- landscape
- timezone
- puppet
- chef
+ - salt-minion
- mcollective
- disable-ec2-metadata
- runcmd
diff --git a/doc/configdrive/README b/doc/configdrive/README
new file mode 100644
index 00000000..ed9033c9
--- /dev/null
+++ b/doc/configdrive/README
@@ -0,0 +1,118 @@
+The 'ConfigDrive' DataSource supports the OpenStack configdrive disk.
+See doc/source/api_ext/ext_config_drive.rst in the nova source code for
+more information on config drive.
+
+The following criteria are required to be identified by
+DataSourceConfigDrive as a config drive:
+ * must be formated with vfat filesystem
+ * must be a un-partitioned block device (/dev/vdb, not /dev/vdb1)
+ * must contain one of the following files:
+ * etc/network/interfaces
+ * root/.ssh/authorized_keys
+ * meta.js
+
+By default, cloud-init does not consider this source to be a full-fledged
+datasource. Instead, the default behavior is to assume it is really only
+present to provide networking information. Cloud-init will copy off the
+network information, apply it to the system, and then continue on. The
+"full" datasource would then be found in the EC2 metadata service.
+
+== Content of config-drive ==
+ * etc/network/interfaces
+ This file is laid down by nova in order to pass static networking
+ information to the guest. Cloud-init will copy it off of the config-drive
+ and into /etc/network/interfaces as soon as it can, and then attempt to
+ bring up all network interfaces.
+
+ * root/.ssh/authorized_keys
+ This file is laid down by nova, and contains the keys that were
+ provided to it on instance creation (nova-boot --key ....)
+
+ Cloud-init will copy those keys and put them into the configured user
+ ('ubuntu') .ssh/authorized_keys.
+
+ * meta.js
+ meta.js is populated on the config-drive in response to the user passing
+ "meta flags" (nova boot --meta key=value ...). It is expected to be json
+ formated.
+
+== Configuration ==
+Cloud-init's behavior can be modified by keys found in the meta.js file in
+the following ways:
+ * dsmode:
+ values: local, net, pass
+ default: pass
+
+ This is what indicates if configdrive is a final data source or not.
+ By default it is 'pass', meaning this datasource should not be read.
+ Set it to 'local' or 'net' to stop cloud-init from continuing on to
+ search for other data sources after network config.
+
+ The difference between 'local' and 'net' is that local will not require
+ networking to be up before user-data actions (or boothooks) are run.
+
+ * instance-id:
+ default: iid-dsconfigdrive
+ This is utilized as the metadata's instance-id. It should generally
+ be unique, as it is what is used to determine "is this a new instance".
+
+ * public-keys:
+ default: None
+ if present, these keys will be used as the public keys for the
+ instance. This value overrides the content in authorized_keys.
+ Note: it is likely preferable to provide keys via user-data
+
+ * user-data:
+ default: None
+ This provides cloud-init user-data. See other documentation for what
+ all can be present here.
+
+== Example ==
+Here is an example using the nova client (python-novaclien)
+
+Assuming the following variables set up:
+ * img_id : set to the nova image id (uuid from image-list)
+ * flav_id : set to numeric flavor_id (nova flavor-list)
+ * keyname : set to name of key for this instance (nova keypair-list)
+
+$ cat my-user-data
+#!/bin/sh
+echo ==== USER_DATA FROM EC2 MD ==== | tee /ud.log
+
+$ ud_value=$(sed 's,EC2 MD,META KEY,')
+
+## Now, 'ud_value' has same content of my-user-data file, but
+## with the string "USER_DATA FROM META KEY"
+
+## launch an instance with dsmode=pass
+## This will really not use the configdrive for anything as the mode
+## for the datasource is 'pass', meaning it will still expect some
+## other data source (DataSourceEc2).
+
+$ nova boot --image=$img_id --config-drive=1 --flavor=$flav_id \
+ --key_name=$keyname \
+ --user_data=my-user-data \
+ "--meta=instance-id=iid-001 \
+ "--meta=user-data=${ud_keyval}" \
+ "--meta=dsmode=pass" cfgdrive-dsmode-pass
+
+$ euca-get-console-output i-0000001 | grep USER_DATA
+echo ==== USER_DATA FROM EC2 MD ==== | tee /ud.log
+
+## Now, launch an instance with dsmode=local
+## This time, the only metadata and userdata available to cloud-init
+## are on the config-drive
+$ nova boot --image=$img_id --config-drive=1 --flavor=$flav_id \
+ --key_name=$keyname \
+ --user_data=my-user-data \
+ "--meta=instance-id=iid-001 \
+ "--meta=user-data=${ud_keyval}" \
+ "--meta=dsmode=local" cfgdrive-dsmode-local
+
+$ euca-get-console-output i-0000002 | grep USER_DATA
+echo ==== USER_DATA FROM META KEY ==== | tee /ud.log
+
+--
+[1] https://github.com/openstack/nova/blob/master/doc/source/api_ext/ext_config_drive.rst for more if
+
+
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
index cbaa3467..e9372144 100644
--- a/doc/examples/cloud-config-chef.txt
+++ b/doc/examples/cloud-config-chef.txt
@@ -1,6 +1,6 @@
#cloud-config
#
-# This is an example file to automatically setup and run puppetd
+# This is an example file to automatically setup chef and run a list of recipes
# when the instance boots for the first time.
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index b86c5ba6..b3a26114 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -13,3 +13,14 @@ datasource:
metadata_urls:
- http://169.254.169.254:80
- http://instance-data:8773
+
+ MaaS:
+ timeout : 50
+ max_wait : 120
+
+ # there are no default values for metadata_url or oauth credentials
+ # If no credentials are present, non-authed attempts will be made.
+ metadata_url: http://mass-host.localdomain/source
+ consumer_key: Xh234sdkljf
+ token_key: kjfhgb3n
+ token_secret: 24uysdfx1w4
diff --git a/doc/examples/cloud-config-salt-minion.txt b/doc/examples/cloud-config-salt-minion.txt
new file mode 100644
index 00000000..939fdc8b
--- /dev/null
+++ b/doc/examples/cloud-config-salt-minion.txt
@@ -0,0 +1,53 @@
+#cloud-config
+#
+# This is an example file to automatically setup and run a salt
+# minion when the instance boots for the first time.
+# Make sure that this file is valid yaml before starting instances.
+# It should be passed as user-data when starting the instance.
+
+salt_minion:
+ # conf contains all the directives to be assigned in /etc/salt/minion.
+
+ conf:
+ # Set the location of the salt master server, if the master server cannot be
+ # resolved, then the minion will fail to start.
+
+ master: salt.example.com
+
+ # Salt keys are manually generated by: salt-key --gen-keys=GEN_KEYS,
+ # where GEN_KEYS is the name of the keypair, e.g. 'minion'. The keypair
+ # will be copied to /etc/salt/pki on the minion instance.
+
+ public_key: |
+ -----BEGIN PUBLIC KEY-----
+ MIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEAwI4yqk1Y12zVmu9Ejlua
+ h2FD6kjrt+N9XfGqZUUVNeRb7CA0Sj5Q6NtgoaiXuIrSea2sLda6ivqAGmtxMMrP
+ zpf3FwsYWxBUNF7D4YeLmYjvcTbfr3bCOIRnPNXZ+4isuvvEiM02u2cO0okZSgeb
+ dofNa1NbTLYAQr9jZZb7GPKrTO4CKy0xzBih/A+sl6dL9PNDmqXQEjyJS6PXG1Vj
+ PvD5jpSrxuIl5Ms/+2Ro3ALgvC8dgoY/3m3csnd06afumGKv5YOGtf+bnWLhc0bf
+ 6Sk8Q6i5t0Bl+HAULSPr+B9x/I0rN76ZnPvTj1+hJ0zTof4d0hOLx/K5OQyt7AKo
+ 4wIBAQ==
+ -----END PUBLIC KEY-----
+
+ private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ Proc-Type: 4,ENCRYPTED
+ DEK-Info: AES-128-CBC,ECE30DBBA56E2DF06B7BC415F8870994
+
+ YQOE5HIsghqjRsxPQqiWMH/VHmyFH6xIpBcmzxzispEHwBojlvLXviwvR66YhgNw
+ 7smwE10Ik4/cwwiHTZqCk++jPATPygBiqQkUijCWzcT9kfaxmqdP4PL+hu9g7kGC
+ KrD2Bm8/oO08s957aThuHC1sABRcJ1V3FRzJT6Za4fwweyvHVYRnmgaDA6zH0qV8
+ NqBSB2hnNXKEdh6UFz9QGcrQxnRjfdIaW64zoEX7jT7gYYL7FkGXBa3XdMOA4fnl
+ adRwLFMs0jfilisZv8oUbPdZ6J6x3o8p8LVecCF8tdZt1zkcLSIXKnoDFpHSISGs
+ BD9aqD+E4ejynM/tPaVFq4IHzT8viN6h6WcH8fbpClFZ66Iyy9XL3/CjAY7Jzhh9
+ fnbc4Iq28cdbmO/vkR7JyVOgEMWe1BcSqtro70XoUNRY8uDJUPqohrhm/9AigFRA
+ Pwyf3LqojxRnwXjHsZtGltUtEAPZzgh3fKJnx9MyRR7DPXBRig7TAHU7n2BFRhHA
+ TYThy29bK6NkIc/cKc2kEQVo98Cr04PO8jVxZM332FlhiVlP0kpAp+tFj7aMzPTG
+ sJumb9kPbMsgpEuTCONm3yyoufGEBFMrIJ+Po48M2RlYOh50VkO09pI+Eu7FPtVB
+ H4gKzoJIpZZ/7vYXQ3djM8s9hc5gD5CVExTZV4drbsXt6ITiwHuxZ6CNHRBPL5AY
+ wmF8QZz4oivv1afdSe6E6OGC3uVmX3Psn5CVq2pE8VlRDKFy1WqfU2enRAijSS2B
+ rtJs263fOJ8ZntDzMVMPgiAlzzfA285KUletpAeUmz+peR1gNzkE0eKSG6THOCi0
+ rfmR8SeEzyNvin0wQ3qgYiiHjHbbFhJIMAQxoX+0hDSooM7Wo5wkLREULpGuesTg
+ A6Fe3CiOivMDraNGA7H6Yg==
+ -----END RSA PRIVATE KEY-----
+
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 4f621274..171802cc 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -45,6 +45,15 @@ apt_mirror_search:
# apt_proxy (configure Acquire::HTTP::Proxy)
apt_proxy: http://my.apt.proxy:3128
+# apt_pipelining (configure Acquire::http::Pipeline-Depth)
+# Default: disables HTTP pipelining. Certain web servers, such
+# as S3 do not pipeline properly (LP: #948461).
+# Valid options:
+# False/default: Disables pipelining for APT
+# None/Unchanged: Use OS default
+# Number: Set pipelining to some number (not recommended)
+apt_pipelining: False
+
# Preserve existing /etc/apt/sources.list
# Default: overwrite sources_list with mirror. If this is true
# then apt_mirror above will have no effect
diff --git a/doc/nocloud/README b/doc/nocloud/README
new file mode 100644
index 00000000..c94b206a
--- /dev/null
+++ b/doc/nocloud/README
@@ -0,0 +1,55 @@
+The data source 'NoCloud' and 'NoCloudNet' allow the user to provide user-data
+and meta-data to the instance without running a network service (or even without
+having a network at all)
+
+You can provide meta-data and user-data to a local vm boot via files on a vfat
+or iso9660 filesystem. These user-data and meta-data files are expected to be
+in the format described in doc/example/seed/README . Basically, user-data is
+simply user-data and meta-data is a yaml formated file representing what you'd
+find in the EC2 metadata service.
+
+Given a disk 12.04 cloud image in 'disk.img', you can create a sufficient disk
+by following the example below.
+
+## create user-data and meta-data files that will be used
+## to modify image on first boot
+$ { echo instance-id: iid-local01; echo local-hostname: cloudimg; } > meta-data
+
+$ printf "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data
+
+## create a disk to attach with some user-data and meta-data
+$ genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data
+
+## alternatively, create a vfat filesystem with same files
+## $ truncate --size 2M seed.img
+## $ mkfs.vfat -n cidata seed.img
+## $ mcopy -oi seed.img user-data meta-data ::
+
+## create a new qcow image to boot, backed by your original image
+$ qemu-img create -f qcow2 -b disk.img boot-disk.img
+
+## boot the image and login as 'ubuntu' with password 'passw0rd'
+## note, passw0rd was set as password through the user-data above,
+## there is no password set on these images.
+$ kvm -m 256 \
+ -net nic -net user,hostfwd=tcp::2222-:22 \
+ -drive file=boot-disk.img,if=virtio \
+ -drive file=seed.iso,if=virtio
+
+Note, that the instance-id provided ('iid-local01' above) is what is used to
+determine if this is "first boot". So if you are making updates to user-data
+you will also have to change that, or start the disk fresh.
+
+
+Also, you can inject an /etc/network/interfaces file by providing the content
+for that file in the 'network-interfaces' field of metadata. Example metadata:
+ instance-id: iid-abcdefg
+ network-interfaces: |
+ iface eth0 inet static
+ address 192.168.1.10
+ network 192.168.1.0
+ netmask 255.255.255.0
+ broadcast 192.168.1.255
+ gateway 192.168.1.254
+ hostname: myhost
+
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
new file mode 100644
index 00000000..e157fa77
--- /dev/null
+++ b/tests/unittests/test__init__.py
@@ -0,0 +1,195 @@
+from mocker import MockerTestCase, ANY, ARGS, KWARGS
+import os
+
+from cloudinit import (partwalker_handle_handler, handler_handle_part,
+ handler_register)
+from cloudinit.util import write_file, logexc
+
+
+class TestPartwalkerHandleHandler(MockerTestCase):
+ def setUp(self):
+ self.data = {
+ "handlercount": 0,
+ "frequency": "?",
+ "handlerdir": "?",
+ "handlers": [],
+ "data": None}
+
+ self.expected_module_name = "part-handler-%03d" % (
+ self.data["handlercount"],)
+ expected_file_name = "%s.py" % self.expected_module_name
+ expected_file_fullname = os.path.join(self.data["handlerdir"],
+ expected_file_name)
+ self.module_fake = "fake module handle"
+ self.ctype = None
+ self.filename = None
+ self.payload = "dummy payload"
+
+ # Mock the write_file function
+ write_file_mock = self.mocker.replace(write_file, passthrough=False)
+ write_file_mock(expected_file_fullname, self.payload, 0600)
+
+ def test_no_errors(self):
+ """Payload gets written to file and added to C{pdata}."""
+ # Mock the __import__ builtin
+ import_mock = self.mocker.replace("__builtin__.__import__")
+ import_mock(self.expected_module_name)
+ self.mocker.result(self.module_fake)
+ # Mock the handle_register function
+ handle_reg_mock = self.mocker.replace(handler_register,
+ passthrough=False)
+ handle_reg_mock(self.module_fake, self.data["handlers"],
+ self.data["data"], self.data["frequency"])
+ # Activate mocks
+ self.mocker.replay()
+
+ partwalker_handle_handler(self.data, self.ctype, self.filename,
+ self.payload)
+
+ self.assertEqual(1, self.data["handlercount"])
+
+ def test_import_error(self):
+ """Module import errors are logged. No handler added to C{pdata}"""
+ # Mock the __import__ builtin
+ import_mock = self.mocker.replace("__builtin__.__import__")
+ import_mock(self.expected_module_name)
+ self.mocker.throw(ImportError())
+ # Mock log function
+ logexc_mock = self.mocker.replace(logexc, passthrough=False)
+ logexc_mock(ANY)
+ # Mock the print_exc function
+ print_exc_mock = self.mocker.replace("traceback.print_exc",
+ passthrough=False)
+ print_exc_mock(ARGS, KWARGS)
+ # Activate mocks
+ self.mocker.replay()
+
+ partwalker_handle_handler(self.data, self.ctype, self.filename,
+ self.payload)
+
+ self.assertEqual(0, self.data["handlercount"])
+
+ def test_attribute_error(self):
+ """Attribute errors are logged. No handler added to C{pdata}"""
+ # Mock the __import__ builtin
+ import_mock = self.mocker.replace("__builtin__.__import__")
+ import_mock(self.expected_module_name)
+ self.mocker.result(self.module_fake)
+ # Mock the handle_register function
+ handle_reg_mock = self.mocker.replace(handler_register,
+ passthrough=False)
+ handle_reg_mock(self.module_fake, self.data["handlers"],
+ self.data["data"], self.data["frequency"])
+ self.mocker.throw(AttributeError())
+ # Mock log function
+ logexc_mock = self.mocker.replace(logexc, passthrough=False)
+ logexc_mock(ANY)
+ # Mock the print_exc function
+ print_exc_mock = self.mocker.replace("traceback.print_exc",
+ passthrough=False)
+ print_exc_mock(ARGS, KWARGS)
+ # Activate mocks
+ self.mocker.replay()
+
+ partwalker_handle_handler(self.data, self.ctype, self.filename,
+ self.payload)
+
+ self.assertEqual(0, self.data["handlercount"])
+
+
+class TestHandlerHandlePart(MockerTestCase):
+ def setUp(self):
+ self.data = "fake data"
+ self.ctype = "fake ctype"
+ self.filename = "fake filename"
+ self.payload = "fake payload"
+ self.frequency = "once-per-instance"
+
+ def test_normal_version_1(self):
+ """
+ C{handle_part} is called without C{frequency} for
+ C{handler_version} == 1.
+ """
+ # Build a mock part-handler module
+ mod_mock = self.mocker.mock()
+ getattr(mod_mock, "frequency")
+ self.mocker.result("once-per-instance")
+ getattr(mod_mock, "handler_version")
+ self.mocker.result(1)
+ mod_mock.handle_part(self.data, self.ctype, self.filename,
+ self.payload)
+ self.mocker.replay()
+
+ handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
+
+ def test_normal_version_2(self):
+ """
+ C{handle_part} is called with C{frequency} for
+ C{handler_version} == 2.
+ """
+ # Build a mock part-handler module
+ mod_mock = self.mocker.mock()
+ getattr(mod_mock, "frequency")
+ self.mocker.result("once-per-instance")
+ getattr(mod_mock, "handler_version")
+ self.mocker.result(2)
+ mod_mock.handle_part(self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
+ self.mocker.replay()
+
+ handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
+
+ def test_modfreq_per_always(self):
+ """
+ C{handle_part} is called regardless of frequency if nofreq is always.
+ """
+ self.frequency = "once"
+ # Build a mock part-handler module
+ mod_mock = self.mocker.mock()
+ getattr(mod_mock, "frequency")
+ self.mocker.result("always")
+ getattr(mod_mock, "handler_version")
+ self.mocker.result(1)
+ mod_mock.handle_part(self.data, self.ctype, self.filename,
+ self.payload)
+ self.mocker.replay()
+
+ handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
+
+ def test_no_handle_when_modfreq_once(self):
+ """C{handle_part} is not called if frequency is once"""
+ self.frequency = "once"
+ # Build a mock part-handler module
+ mod_mock = self.mocker.mock()
+ getattr(mod_mock, "frequency")
+ self.mocker.result("once-per-instance")
+ self.mocker.replay()
+
+ handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
+
+ def test_exception_is_caught(self):
+ """Exceptions within C{handle_part} are caught and logged."""
+ # Build a mock part-handler module
+ mod_mock = self.mocker.mock()
+ getattr(mod_mock, "frequency")
+ self.mocker.result("once-per-instance")
+ getattr(mod_mock, "handler_version")
+ self.mocker.result(1)
+ mod_mock.handle_part(self.data, self.ctype, self.filename,
+ self.payload)
+ self.mocker.throw(Exception())
+ # Mock log function
+ logexc_mock = self.mocker.replace(logexc, passthrough=False)
+ logexc_mock(ANY)
+ # Mock the print_exc function
+ print_exc_mock = self.mocker.replace("traceback.print_exc",
+ passthrough=False)
+ print_exc_mock(ARGS, KWARGS)
+ self.mocker.replay()
+
+ handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
new file mode 100644
index 00000000..d0e121d6
--- /dev/null
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -0,0 +1,151 @@
+from tempfile import mkdtemp
+from shutil import rmtree
+import os
+from StringIO import StringIO
+from copy import copy
+from cloudinit.DataSourceMaaS import (
+ MaasSeedDirNone,
+ MaasSeedDirMalformed,
+ read_maas_seed_dir,
+ read_maas_seed_url,
+)
+from mocker import MockerTestCase
+
+
+class TestMaasDataSource(MockerTestCase):
+
+ def setUp(self):
+ super(TestMaasDataSource, self).setUp()
+ # Make a temp directoy for tests to use.
+ self.tmp = mkdtemp(prefix="unittest_")
+
+ def tearDown(self):
+ super(TestMaasDataSource, self).tearDown()
+ # Clean up temp directory
+ rmtree(self.tmp)
+
+ def test_seed_dir_valid(self):
+ """Verify a valid seeddir is read as such"""
+
+ data = {'instance-id': 'i-valid01',
+ 'local-hostname': 'valid01-hostname',
+ 'user-data': 'valid01-userdata'}
+
+ my_d = os.path.join(self.tmp, "valid")
+ populate_dir(my_d, data)
+
+ (userdata, metadata) = read_maas_seed_dir(my_d)
+
+ self.assertEqual(userdata, data['user-data'])
+ for key in ('instance-id', 'local-hostname'):
+ self.assertEqual(data[key], metadata[key])
+
+ # verify that 'userdata' is not returned as part of the metadata
+ self.assertFalse(('user-data' in metadata))
+
+ def test_seed_dir_valid_extra(self):
+ """Verify extra files do not affect seed_dir validity """
+
+ data = {'instance-id': 'i-valid-extra',
+ 'local-hostname': 'valid-extra-hostname',
+ 'user-data': 'valid-extra-userdata', 'foo': 'bar'}
+
+ my_d = os.path.join(self.tmp, "valid_extra")
+ populate_dir(my_d, data)
+
+ (userdata, metadata) = read_maas_seed_dir(my_d)
+
+ self.assertEqual(userdata, data['user-data'])
+ for key in ('instance-id', 'local-hostname'):
+ self.assertEqual(data[key], metadata[key])
+
+ # additional files should not just appear as keys in metadata atm
+ self.assertFalse(('foo' in metadata))
+
+ def test_seed_dir_invalid(self):
+ """Verify that invalid seed_dir raises MaasSeedDirMalformed"""
+
+ valid = {'instance-id': 'i-instanceid',
+ 'local-hostname': 'test-hostname', 'user-data': ''}
+
+ my_based = os.path.join(self.tmp, "valid_extra")
+
+ # missing 'userdata' file
+ my_d = "%s-01" % my_based
+ invalid_data = copy(valid)
+ del invalid_data['local-hostname']
+ populate_dir(my_d, invalid_data)
+ self.assertRaises(MaasSeedDirMalformed, read_maas_seed_dir, my_d)
+
+ # missing 'instance-id'
+ my_d = "%s-02" % my_based
+ invalid_data = copy(valid)
+ del invalid_data['instance-id']
+ populate_dir(my_d, invalid_data)
+ self.assertRaises(MaasSeedDirMalformed, read_maas_seed_dir, my_d)
+
+ def test_seed_dir_none(self):
+ """Verify that empty seed_dir raises MaasSeedDirNone"""
+
+ my_d = os.path.join(self.tmp, "valid_empty")
+ self.assertRaises(MaasSeedDirNone, read_maas_seed_dir, my_d)
+
+ def test_seed_dir_missing(self):
+ """Verify that missing seed_dir raises MaasSeedDirNone"""
+ self.assertRaises(MaasSeedDirNone, read_maas_seed_dir,
+ os.path.join(self.tmp, "nonexistantdirectory"))
+
+ def test_seed_url_valid(self):
+ """Verify that valid seed_url is read as such"""
+ valid = {'meta-data/instance-id': 'i-instanceid',
+ 'meta-data/local-hostname': 'test-hostname',
+ 'user-data': 'foodata'}
+
+ my_seed = "http://example.com/xmeta"
+ my_ver = "1999-99-99"
+ my_headers = {'header1': 'value1', 'header2': 'value2'}
+
+ def my_headers_cb(url):
+ return(my_headers)
+
+ mock_request = self.mocker.replace("urllib2.Request",
+ passthrough=False)
+ mock_urlopen = self.mocker.replace("urllib2.urlopen",
+ passthrough=False)
+
+ for (key, val) in valid.iteritems():
+ mock_request("%s/%s/%s" % (my_seed, my_ver, key),
+ data=None, headers=my_headers)
+ self.mocker.nospec()
+ self.mocker.result("fake-request-%s" % key)
+ mock_urlopen("fake-request-%s" % key, timeout=None)
+ self.mocker.result(StringIO(val))
+
+ self.mocker.replay()
+
+ (userdata, metadata) = read_maas_seed_url(my_seed,
+ header_cb=my_headers_cb, version=my_ver)
+
+ self.assertEqual("foodata", userdata)
+ self.assertEqual(metadata['instance-id'],
+ valid['meta-data/instance-id'])
+ self.assertEqual(metadata['local-hostname'],
+ valid['meta-data/local-hostname'])
+
+ def test_seed_url_invalid(self):
+ """Verify that invalid seed_url raises MaasSeedDirMalformed"""
+ pass
+
+ def test_seed_url_missing(self):
+ """Verify seed_url with no found entries raises MaasSeedDirNone"""
+ pass
+
+
+def populate_dir(seed_dir, files):
+ os.mkdir(seed_dir)
+ for (name, content) in files.iteritems():
+ with open(os.path.join(seed_dir, name), "w") as fp:
+ fp.write(content)
+ fp.close()
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 21d2442f..21d2442f 100644
--- a/tests/unittests/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index d8da8bc9..ca96bc60 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -28,7 +28,7 @@ class TestMergeDict(TestCase):
def test_merge_does_not_override(self):
"""Test that candidate doesn't override source."""
source = {"key1": "value1", "key2": "value2"}
- candidate = {"key2": "value2", "key2": "NEW VALUE"}
+ candidate = {"key1": "value2", "key2": "NEW VALUE"}
result = mergedict(source, candidate)
self.assertEqual(source, result)
diff --git a/tools/run-pylint b/tools/run-pylint
index e271c3d5..46748ffb 100755
--- a/tools/run-pylint
+++ b/tools/run-pylint
@@ -1,6 +1,8 @@
#!/bin/bash
-def_files='cloud*.py cloudinit/*.py cloudinit/CloudConfig/*.py'
+ci_files='cloud*.py cloudinit/*.py cloudinit/CloudConfig/*.py'
+test_files=$(find tests -name "*.py")
+def_files="$ci_files $test_files"
if [ $# -eq 0 ]; then
files=( )