diff options
Diffstat (limited to 'cloudinit')
-rw-r--r-- | cloudinit/CloudConfig/cc_apt_pipelining.py | 53 | ||||
-rw-r--r-- | cloudinit/CloudConfig/cc_resizefs.py | 4 | ||||
-rw-r--r-- | cloudinit/CloudConfig/cc_salt_minion.py | 56 | ||||
-rw-r--r-- | cloudinit/CloudConfig/cc_update_etc_hosts.py | 2 | ||||
-rw-r--r-- | cloudinit/DataSource.py | 3 | ||||
-rw-r--r-- | cloudinit/DataSourceConfigDrive.py | 231 | ||||
-rw-r--r-- | cloudinit/DataSourceEc2.py | 86 | ||||
-rw-r--r-- | cloudinit/DataSourceMaaS.py | 344 | ||||
-rw-r--r-- | cloudinit/DataSourceNoCloud.py | 70 | ||||
-rw-r--r-- | cloudinit/DataSourceOVF.py | 2 | ||||
-rw-r--r-- | cloudinit/UserDataHandler.py | 2 | ||||
-rw-r--r-- | cloudinit/__init__.py | 19 | ||||
-rw-r--r-- | cloudinit/netinfo.py | 11 | ||||
-rw-r--r-- | cloudinit/util.py | 258 |
14 files changed, 1016 insertions, 125 deletions
diff --git a/cloudinit/CloudConfig/cc_apt_pipelining.py b/cloudinit/CloudConfig/cc_apt_pipelining.py new file mode 100644 index 00000000..0286a9ae --- /dev/null +++ b/cloudinit/CloudConfig/cc_apt_pipelining.py @@ -0,0 +1,53 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# +# Author: Ben Howard <ben.howard@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import cloudinit.util as util +from cloudinit.CloudConfig import per_instance + +frequency = per_instance +default_file = "/etc/apt/apt.conf.d/90cloud-init-pipelining" + + +def handle(_name, cfg, _cloud, log, _args): + + apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) + apt_pipe_value = str(apt_pipe_value).lower() + + if apt_pipe_value == "false": + write_apt_snippet("0", log) + + elif apt_pipe_value in ("none", "unchanged", "os"): + return + + elif apt_pipe_value in str(range(0, 6)): + write_apt_snippet(apt_pipe_value, log) + + else: + log.warn("Invalid option for apt_pipeling: %s" % apt_pipe_value) + + +def write_apt_snippet(setting, log, f_name=default_file): + """ Writes f_name with apt pipeline depth 'setting' """ + + acquire_pipeline_depth = 'Acquire::http::Pipeline-Depth "%s";\n' + file_contents = ("//Written by cloud-init per 'apt_pipelining'\n" + + (acquire_pipeline_depth % setting)) + + util.write_file(f_name, file_contents) + + log.debug("Wrote %s with APT pipeline setting" % f_name) diff --git a/cloudinit/CloudConfig/cc_resizefs.py b/cloudinit/CloudConfig/cc_resizefs.py index 0186d4d2..c76cc664 100644 --- a/cloudinit/CloudConfig/cc_resizefs.py +++ b/cloudinit/CloudConfig/cc_resizefs.py @@ -49,8 +49,8 @@ def handle(_name, cfg, _cloud, log, args): dev = os.makedev(os.major(st_dev), os.minor(st_dev)) os.mknod(devpth, 0400 | stat.S_IFBLK, dev) except: - if util.islxc(): - log.debug("inside lxc, ignoring mknod failure in resizefs") + if util.is_container(): + log.debug("inside container, ignoring mknod failure in resizefs") return log.warn("Failed to make device node to resize /") raise diff --git a/cloudinit/CloudConfig/cc_salt_minion.py b/cloudinit/CloudConfig/cc_salt_minion.py new file mode 100644 index 00000000..1a3b5039 --- /dev/null +++ b/cloudinit/CloudConfig/cc_salt_minion.py @@ -0,0 +1,56 @@ +# vi: ts=4 expandtab +# +# Author: Jeff Bauer <jbauer@rubic.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import os.path +import subprocess +import cloudinit.CloudConfig as cc +import yaml + + +def handle(_name, cfg, _cloud, _log, _args): + # If there isn't a salt key in the configuration don't do anything + if 'salt_minion' not in cfg: + return + salt_cfg = cfg['salt_minion'] + # Start by installing the salt package ... + cc.install_packages(("salt",)) + config_dir = '/etc/salt' + if not os.path.isdir(config_dir): + os.makedirs(config_dir) + # ... and then update the salt configuration + if 'conf' in salt_cfg: + # Add all sections from the conf object to /etc/salt/minion + minion_config = os.path.join(config_dir, 'minion') + yaml.dump(salt_cfg['conf'], + file(minion_config, 'w'), + default_flow_style=False) + # ... copy the key pair if specified + if 'public_key' in salt_cfg and 'private_key' in salt_cfg: + pki_dir = '/etc/salt/pki' + cumask = os.umask(077) + if not os.path.isdir(pki_dir): + os.makedirs(pki_dir) + pub_name = os.path.join(pki_dir, 'minion.pub') + pem_name = os.path.join(pki_dir, 'minion.pem') + with open(pub_name, 'w') as f: + f.write(salt_cfg['public_key']) + with open(pem_name, 'w') as f: + f.write(salt_cfg['private_key']) + os.umask(cumask) + + # Start salt-minion + subprocess.check_call(['service', 'salt-minion', 'start']) diff --git a/cloudinit/CloudConfig/cc_update_etc_hosts.py b/cloudinit/CloudConfig/cc_update_etc_hosts.py index 572e6750..6ad2fca8 100644 --- a/cloudinit/CloudConfig/cc_update_etc_hosts.py +++ b/cloudinit/CloudConfig/cc_update_etc_hosts.py @@ -28,7 +28,7 @@ frequency = per_always def handle(_name, cfg, cloud, log, _args): (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) - manage_hosts = util.get_cfg_option_bool(cfg, "manage_etc_hosts", False) + manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False) if manage_hosts in ("True", "true", True, "template"): # render from template file try: diff --git a/cloudinit/DataSource.py b/cloudinit/DataSource.py index f38e3b20..1bcb9c10 100644 --- a/cloudinit/DataSource.py +++ b/cloudinit/DataSource.py @@ -72,6 +72,9 @@ class DataSource: if isinstance(self.metadata['public-keys'], str): return([self.metadata['public-keys'], ]) + if isinstance(self.metadata['public-keys'], list): + return(self.metadata['public-keys']) + for _keyname, klist in self.metadata['public-keys'].items(): # lp:506332 uec metadata service responds with # data that makes boto populate a string for 'klist' rather diff --git a/cloudinit/DataSourceConfigDrive.py b/cloudinit/DataSourceConfigDrive.py new file mode 100644 index 00000000..2db4a76a --- /dev/null +++ b/cloudinit/DataSourceConfigDrive.py @@ -0,0 +1,231 @@ +# Copyright (C) 2012 Canonical Ltd. +# +# Author: Scott Moser <scott.moser@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import cloudinit.DataSource as DataSource + +from cloudinit import seeddir as base_seeddir +from cloudinit import log +import cloudinit.util as util +import os.path +import os +import json +import subprocess + +DEFAULT_IID = "iid-dsconfigdrive" + + +class DataSourceConfigDrive(DataSource.DataSource): + seed = None + seeddir = base_seeddir + '/config_drive' + cfg = {} + userdata_raw = None + metadata = None + dsmode = "local" + + def __str__(self): + mstr = "DataSourceConfigDrive[%s]" % self.dsmode + mstr = mstr + " [seed=%s]" % self.seed + return(mstr) + + def get_data(self): + found = None + md = {} + ud = "" + + defaults = {"instance-id": DEFAULT_IID, "dsmode": "pass"} + + if os.path.isdir(self.seeddir): + try: + (md, ud) = read_config_drive_dir(self.seeddir) + found = self.seeddir + except nonConfigDriveDir: + pass + + if not found: + dev = cfg_drive_device() + if dev: + try: + (md, ud) = util.mount_callback_umount(dev, + read_config_drive_dir) + found = dev + except (nonConfigDriveDir, util.mountFailedError): + pass + + if not found: + return False + + if 'dsconfig' in md: + self.cfg = md['dscfg'] + + md = util.mergedict(md, defaults) + + # update interfaces and ifup only on the local datasource + # this way the DataSourceConfigDriveNet doesn't do it also. + if 'network-interfaces' in md and self.dsmode == "local": + if md['dsmode'] == "pass": + log.info("updating network interfaces from configdrive") + else: + log.debug("updating network interfaces from configdrive") + + util.write_file("/etc/network/interfaces", + md['network-interfaces']) + try: + (out, err) = util.subp(['ifup', '--all']) + if len(out) or len(err): + log.warn("ifup --all had stderr: %s" % err) + + except subprocess.CalledProcessError as exc: + log.warn("ifup --all failed: %s" % (exc.output[1])) + + self.seed = found + self.metadata = md + self.userdata_raw = ud + + if md['dsmode'] == self.dsmode: + return True + + log.debug("%s: not claiming datasource, dsmode=%s" % + (self, md['dsmode'])) + return False + + def get_public_ssh_keys(self): + if not 'public-keys' in self.metadata: + return([]) + return(self.metadata['public-keys']) + + # the data sources' config_obj is a cloud-config formated + # object that came to it from ways other than cloud-config + # because cloud-config content would be handled elsewhere + def get_config_obj(self): + return(self.cfg) + + +class DataSourceConfigDriveNet(DataSourceConfigDrive): + dsmode = "net" + + +class nonConfigDriveDir(Exception): + pass + + +def cfg_drive_device(): + """ get the config drive device. return a string like '/dev/vdb' + or None (if there is no non-root device attached). This does not + check the contents, only reports that if there *were* a config_drive + attached, it would be this device. + per config_drive documentation, this is + "associated as the last available disk on the instance" + """ + + if 'CLOUD_INIT_CONFIG_DRIVE_DEVICE' in os.environ: + return(os.environ['CLOUD_INIT_CONFIG_DRIVE_DEVICE']) + + # we are looking for a raw block device (sda, not sda1) with a vfat + # filesystem on it. + + letters = "abcdefghijklmnopqrstuvwxyz" + devs = util.find_devs_with("TYPE=vfat") + + # filter out anything not ending in a letter (ignore partitions) + devs = [f for f in devs if f[-1] in letters] + + # sort them in reverse so "last" device is first + devs.sort(reverse=True) + + if len(devs): + return(devs[0]) + + return(None) + + +def read_config_drive_dir(source_dir): + """ + read_config_drive_dir(source_dir): + read source_dir, and return a tuple with metadata dict and user-data + string populated. If not a valid dir, raise a nonConfigDriveDir + """ + md = {} + ud = "" + + flist = ("etc/network/interfaces", "root/.ssh/authorized_keys", "meta.js") + found = [f for f in flist if os.path.isfile("%s/%s" % (source_dir, f))] + keydata = "" + + if len(found) == 0: + raise nonConfigDriveDir("%s: %s" % (source_dir, "no files found")) + + if "etc/network/interfaces" in found: + with open("%s/%s" % (source_dir, "/etc/network/interfaces")) as fp: + md['network-interfaces'] = fp.read() + + if "root/.ssh/authorized_keys" in found: + with open("%s/%s" % (source_dir, "root/.ssh/authorized_keys")) as fp: + keydata = fp.read() + + meta_js = {} + + if "meta.js" in found: + content = '' + with open("%s/%s" % (source_dir, "meta.js")) as fp: + content = fp.read() + md['meta_js'] = content + try: + meta_js = json.loads(content) + except ValueError: + raise nonConfigDriveDir("%s: %s" % + (source_dir, "invalid json in meta.js")) + + keydata = meta_js.get('public-keys', keydata) + + if keydata: + lines = keydata.splitlines() + md['public-keys'] = [l for l in lines + if len(l) and not l.startswith("#")] + + for copy in ('dsmode', 'instance-id', 'dscfg'): + if copy in meta_js: + md[copy] = meta_js[copy] + + if 'user-data' in meta_js: + ud = meta_js['user-data'] + + return(md, ud) + +datasources = ( + (DataSourceConfigDrive, (DataSource.DEP_FILESYSTEM, )), + (DataSourceConfigDriveNet, + (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)), +) + + +# return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return(DataSource.list_from_depends(depends, datasources)) + +if __name__ == "__main__": + def main(): + import sys + import pprint + print cfg_drive_device() + (md, ud) = read_config_drive_dir(sys.argv[1]) + print "=== md ===" + pprint.pprint(md) + print "=== ud ===" + print(ud) + + main() + +# vi: ts=4 expandtab diff --git a/cloudinit/DataSourceEc2.py b/cloudinit/DataSourceEc2.py index 06635746..7051ecda 100644 --- a/cloudinit/DataSourceEc2.py +++ b/cloudinit/DataSourceEc2.py @@ -24,7 +24,6 @@ from cloudinit import seeddir as base_seeddir from cloudinit import log import cloudinit.util as util import socket -import urllib2 import time import boto.utils as boto_utils import os.path @@ -134,8 +133,8 @@ class DataSourceEc2(DataSource.DataSource): url2base[cur] = url starttime = time.time() - url = wait_for_metadata_service(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=log.warn) + url = util.wait_for_url(urls=urls, max_wait=max_wait, + timeout=timeout, status_cb=log.warn) if url: log.debug("Using metadata source: '%s'" % url2base[url]) @@ -208,87 +207,6 @@ class DataSourceEc2(DataSource.DataSource): return False -def wait_for_metadata_service(urls, max_wait=None, timeout=None, - status_cb=None): - """ - urls: a list of urls to try - max_wait: roughly the maximum time to wait before giving up - The max time is *actually* len(urls)*timeout as each url will - be tried once and given the timeout provided. - timeout: the timeout provided to urllib2.urlopen - status_cb: call method with string message when a url is not available - - the idea of this routine is to wait for the EC2 metdata service to - come up. On both Eucalyptus and EC2 we have seen the case where - the instance hit the MD before the MD service was up. EC2 seems - to have permenantely fixed this, though. - - In openstack, the metadata service might be painfully slow, and - unable to avoid hitting a timeout of even up to 10 seconds or more - (LP: #894279) for a simple GET. - - Offset those needs with the need to not hang forever (and block boot) - on a system where cloud-init is configured to look for EC2 Metadata - service but is not going to find one. It is possible that the instance - data host (169.254.169.254) may be firewalled off Entirely for a sytem, - meaning that the connection will block forever unless a timeout is set. - """ - starttime = time.time() - - sleeptime = 1 - - def nullstatus_cb(msg): - return - - if status_cb == None: - status_cb = nullstatus_cb - - def timeup(max_wait, starttime): - return((max_wait <= 0 or max_wait == None) or - (time.time() - starttime > max_wait)) - - loop_n = 0 - while True: - sleeptime = int(loop_n / 5) + 1 - for url in urls: - now = time.time() - if loop_n != 0: - if timeup(max_wait, starttime): - break - if timeout and (now + timeout > (starttime + max_wait)): - # shorten timeout to not run way over max_time - timeout = int((starttime + max_wait) - now) - - reason = "" - try: - req = urllib2.Request(url) - resp = urllib2.urlopen(req, timeout=timeout) - if resp.read() != "": - return url - reason = "empty data [%s]" % resp.getcode() - except urllib2.HTTPError as e: - reason = "http error [%s]" % e.code - except urllib2.URLError as e: - reason = "url error [%s]" % e.reason - except socket.timeout as e: - reason = "socket timeout [%s]" % e - except Exception as e: - reason = "unexpected error [%s]" % e - - if log: - status_cb("'%s' failed [%s/%ss]: %s" % - (url, int(time.time() - starttime), max_wait, - reason)) - - if timeup(max_wait, starttime): - break - - loop_n = loop_n + 1 - time.sleep(sleeptime) - - return False - - datasources = [ (DataSourceEc2, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)), ] diff --git a/cloudinit/DataSourceMaaS.py b/cloudinit/DataSourceMaaS.py new file mode 100644 index 00000000..fd9d6316 --- /dev/null +++ b/cloudinit/DataSourceMaaS.py @@ -0,0 +1,344 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2009-2010 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser <scott.moser@canonical.com> +# Author: Juerg Hafliger <juerg.haefliger@hp.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import cloudinit.DataSource as DataSource + +from cloudinit import seeddir as base_seeddir +from cloudinit import log +import cloudinit.util as util +import errno +import oauth.oauth as oauth +import os.path +import urllib2 +import time + + +MD_VERSION = "2012-03-01" + + +class DataSourceMaaS(DataSource.DataSource): + """ + DataSourceMaaS reads instance information from MaaS. + Given a config metadata_url, and oauth tokens, it expects to find + files under the root named: + instance-id + user-data + hostname + """ + seeddir = base_seeddir + '/maas' + baseurl = None + + def __str__(self): + return("DataSourceMaaS[%s]" % self.baseurl) + + def get_data(self): + mcfg = self.ds_cfg + + try: + (userdata, metadata) = read_maas_seed_dir(self.seeddir) + self.userdata_raw = userdata + self.metadata = metadata + self.baseurl = self.seeddir + return True + except MaasSeedDirNone: + pass + except MaasSeedDirMalformed as exc: + log.warn("%s was malformed: %s\n" % (self.seeddir, exc)) + raise + + try: + # if there is no metadata_url, then we're not configured + url = mcfg.get('metadata_url', None) + if url == None: + return False + + if not self.wait_for_metadata_service(url): + return False + + self.baseurl = url + + (userdata, metadata) = read_maas_seed_url(self.baseurl, + self.md_headers) + self.userdata_raw = userdata + self.metadata = metadata + return True + except Exception: + util.logexc(log) + return False + + def md_headers(self, url): + mcfg = self.ds_cfg + + # if we are missing token_key, token_secret or consumer_key + # then just do non-authed requests + for required in ('token_key', 'token_secret', 'consumer_key'): + if required not in mcfg: + return({}) + + consumer_secret = mcfg.get('consumer_secret', "") + + return(oauth_headers(url=url, consumer_key=mcfg['consumer_key'], + token_key=mcfg['token_key'], token_secret=mcfg['token_secret'], + consumer_secret=consumer_secret)) + + def wait_for_metadata_service(self, url): + mcfg = self.ds_cfg + + max_wait = 120 + try: + max_wait = int(mcfg.get("max_wait", max_wait)) + except Exception: + util.logexc(log) + log.warn("Failed to get max wait. using %s" % max_wait) + + if max_wait == 0: + return False + + timeout = 50 + try: + timeout = int(mcfg.get("timeout", timeout)) + except Exception: + util.logexc(log) + log.warn("Failed to get timeout, using %s" % timeout) + + starttime = time.time() + check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) + url = util.wait_for_url(urls=[check_url], max_wait=max_wait, + timeout=timeout, status_cb=log.warn, + headers_cb=self.md_headers) + + if url: + log.debug("Using metadata source: '%s'" % url) + else: + log.critical("giving up on md after %i seconds\n" % + int(time.time() - starttime)) + + return (bool(url)) + + +def read_maas_seed_dir(seed_d): + """ + Return user-data and metadata for a maas seed dir in seed_d. + Expected format of seed_d are the following files: + * instance-id + * local-hostname + * user-data + """ + files = ('local-hostname', 'instance-id', 'user-data') + md = {} + + if not os.path.isdir(seed_d): + raise MaasSeedDirNone("%s: not a directory") + + for fname in files: + try: + with open(os.path.join(seed_d, fname)) as fp: + md[fname] = fp.read() + fp.close() + except IOError as e: + if e.errno != errno.ENOENT: + raise + + return(check_seed_contents(md, seed_d)) + + +def read_maas_seed_url(seed_url, header_cb=None, timeout=None, + version=MD_VERSION): + """ + Read the maas datasource at seed_url. + header_cb is a method that should return a headers dictionary that will + be given to urllib2.Request() + + Expected format of seed_url is are the following files: + * <seed_url>/<version>/instance-id + * <seed_url>/<version>/local-hostname + * <seed_url>/<version>/user-data + """ + files = ('meta-data/local-hostname', 'meta-data/instance-id', 'user-data') + + base_url = "%s/%s" % (seed_url, version) + md = {} + for fname in files: + url = "%s/%s" % (base_url, fname) + if header_cb: + headers = header_cb(url) + else: + headers = {} + + try: + req = urllib2.Request(url, data=None, headers=headers) + resp = urllib2.urlopen(req, timeout=timeout) + md[os.path.basename(fname)] = resp.read() + except urllib2.HTTPError as e: + if e.code != 404: + raise + + return(check_seed_contents(md, seed_url)) + + +def check_seed_contents(content, seed): + """Validate if content is Is the content a dict that is valid as a + return for a datasource. + Either return a (userdata, metadata) tuple or + Raise MaasSeedDirMalformed or MaasSeedDirNone + """ + md_required = ('instance-id', 'local-hostname') + found = content.keys() + + if len(content) == 0: + raise MaasSeedDirNone("%s: no data files found" % seed) + + missing = [k for k in md_required if k not in found] + if len(missing): + raise MaasSeedDirMalformed("%s: missing files %s" % (seed, missing)) + + userdata = content.get('user-data', "") + md = {} + for (key, val) in content.iteritems(): + if key == 'user-data': + continue + md[key] = val + + return(userdata, md) + + +def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret): + consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) + token = oauth.OAuthToken(token_key, token_secret) + params = { + 'oauth_version': "1.0", + 'oauth_nonce': oauth.generate_nonce(), + 'oauth_timestamp': int(time.time()), + 'oauth_token': token.key, + 'oauth_consumer_key': consumer.key, + } + req = oauth.OAuthRequest(http_url=url, parameters=params) + req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(), + consumer, token) + return(req.to_header()) + + +class MaasSeedDirNone(Exception): + pass + + +class MaasSeedDirMalformed(Exception): + pass + + +datasources = [ + (DataSourceMaaS, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)), +] + + +# return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return(DataSource.list_from_depends(depends, datasources)) + + +if __name__ == "__main__": + def main(): + """ + Call with single argument of directory or http or https url. + If url is given additional arguments are allowed, which will be + interpreted as consumer_key, token_key, token_secret, consumer_secret + """ + import argparse + import pprint + + parser = argparse.ArgumentParser(description='Interact with Maas DS') + parser.add_argument("--config", metavar="file", + help="specify DS config file", default=None) + parser.add_argument("--ckey", metavar="key", + help="the consumer key to auth with", default=None) + parser.add_argument("--tkey", metavar="key", + help="the token key to auth with", default=None) + parser.add_argument("--csec", metavar="secret", + help="the consumer secret (likely '')", default="") + parser.add_argument("--tsec", metavar="secret", + help="the token secret to auth with", default=None) + parser.add_argument("--apiver", metavar="version", + help="the apiver to use ("" can be used)", default=MD_VERSION) + + subcmds = parser.add_subparsers(title="subcommands", dest="subcmd") + subcmds.add_parser('crawl', help="crawl the datasource") + subcmds.add_parser('get', help="do a single GET of provided url") + subcmds.add_parser('check-seed', help="read andn verify seed at url") + + parser.add_argument("url", help="the data source to query") + + args = parser.parse_args() + + creds = {'consumer_key': args.ckey, 'token_key': args.tkey, + 'token_secret': args.tsec, 'consumer_secret': args.csec} + + if args.config: + import yaml + with open(args.config) as fp: + cfg = yaml.load(fp) + if 'datasource' in cfg: + cfg = cfg['datasource']['MaaS'] + for key in creds.keys(): + if key in cfg and creds[key] == None: + creds[key] = cfg[key] + + def geturl(url, headers_cb): + req = urllib2.Request(url, data=None, headers=headers_cb(url)) + return(urllib2.urlopen(req).read()) + + def printurl(url, headers_cb): + print "== %s ==\n%s\n" % (url, geturl(url, headers_cb)) + + def crawl(url, headers_cb=None): + if url.endswith("/"): + for line in geturl(url, headers_cb).splitlines(): + if line.endswith("/"): + crawl("%s%s" % (url, line), headers_cb) + else: + printurl("%s%s" % (url, line), headers_cb) + else: + printurl(url, headers_cb) + + def my_headers(url): + headers = {} + if creds.get('consumer_key', None) != None: + headers = oauth_headers(url, **creds) + return headers + + if args.subcmd == "check-seed": + if args.url.startswith("http"): + (userdata, metadata) = read_maas_seed_url(args.url, + header_cb=my_headers, version=args.apiver) + else: + (userdata, metadata) = read_maas_seed_url(args.url) + print "=== userdata ===" + print userdata + print "=== metadata ===" + pprint.pprint(metadata) + + elif args.subcmd == "get": + printurl(args.url, my_headers) + + elif args.subcmd == "crawl": + if not args.url.endswith("/"): + args.url = "%s/" % args.url + crawl(args.url, my_headers) + + main() diff --git a/cloudinit/DataSourceNoCloud.py b/cloudinit/DataSourceNoCloud.py index fa64f2e5..62ecc088 100644 --- a/cloudinit/DataSourceNoCloud.py +++ b/cloudinit/DataSourceNoCloud.py @@ -23,6 +23,8 @@ import cloudinit.DataSource as DataSource from cloudinit import seeddir as base_seeddir from cloudinit import log import cloudinit.util as util +import errno +import subprocess class DataSourceNoCloud(DataSource.DataSource): @@ -30,6 +32,7 @@ class DataSourceNoCloud(DataSource.DataSource): userdata = None userdata_raw = None supported_seed_starts = ("/", "file://") + dsmode = "local" seed = None cmdline_id = "ds=nocloud" seeddir = base_seeddir + '/nocloud' @@ -41,7 +44,7 @@ class DataSourceNoCloud(DataSource.DataSource): def get_data(self): defaults = { - "instance-id": "nocloud" + "instance-id": "nocloud", "dsmode": self.dsmode } found = [] @@ -64,13 +67,47 @@ class DataSourceNoCloud(DataSource.DataSource): found.append(self.seeddir) log.debug("using seeded cache data in %s" % self.seeddir) + fslist = util.find_devs_with("TYPE=vfat") + fslist.extend(util.find_devs_with("TYPE=iso9660")) + + label_list = util.find_devs_with("LABEL=cidata") + devlist = list(set(fslist) & set(label_list)) + devlist.sort(reverse=True) + + for dev in devlist: + try: + (newmd, newud) = util.mount_callback_umount(dev, + util.read_seeded) + md = util.mergedict(newmd, md) + ud = newud + + # for seed from a device, the default mode is 'net'. + # that is more likely to be what is desired. + # If they want dsmode of local, then they must + # specify that. + if 'dsmode' not in md: + md['dsmode'] = "net" + + log.debug("using data from %s" % dev) + found.append(dev) + break + except OSError, e: + if e.errno != errno.ENOENT: + raise + except util.mountFailedError: + log.warn("Failed to mount %s when looking for seed" % dev) + # there was no indication on kernel cmdline or data # in the seeddir suggesting this handler should be used. if len(found) == 0: return False + seeded_interfaces = None + # the special argument "seedfrom" indicates we should # attempt to seed the userdata / metadata from its value + # its primarily value is in allowing the user to type less + # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg if "seedfrom" in md: seedfrom = md["seedfrom"] seedfound = False @@ -83,6 +120,9 @@ class DataSourceNoCloud(DataSource.DataSource): (seedfrom, self.__class__)) return False + if 'network-interfaces' in md: + seeded_interfaces = self.dsmode + # this could throw errors, but the user told us to do it # so if errors are raised, let them raise (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) @@ -93,10 +133,35 @@ class DataSourceNoCloud(DataSource.DataSource): found.append(seedfrom) md = util.mergedict(md, defaults) + + # update the network-interfaces if metadata had 'network-interfaces' + # entry and this is the local datasource, or 'seedfrom' was used + # and the source of the seed was self.dsmode + # ('local' for NoCloud, 'net' for NoCloudNet') + if ('network-interfaces' in md and + (self.dsmode in ("local", seeded_interfaces))): + log.info("updating network interfaces from nocloud") + + util.write_file("/etc/network/interfaces", + md['network-interfaces']) + try: + (out, err) = util.subp(['ifup', '--all']) + if len(out) or len(err): + log.warn("ifup --all had stderr: %s" % err) + + except subprocess.CalledProcessError as exc: + log.warn("ifup --all failed: %s" % (exc.output[1])) + self.seed = ",".join(found) self.metadata = md self.userdata_raw = ud - return True + + if md['dsmode'] == self.dsmode: + return True + + log.debug("%s: not claiming datasource, dsmode=%s" % + (self, md['dsmode'])) + return False # returns true or false indicating if cmdline indicated @@ -145,6 +210,7 @@ class DataSourceNoCloudNet(DataSourceNoCloud): cmdline_id = "ds=nocloud-net" supported_seed_starts = ("http://", "https://", "ftp://") seeddir = base_seeddir + '/nocloud-net' + dsmode = "net" datasources = ( diff --git a/cloudinit/DataSourceOVF.py b/cloudinit/DataSourceOVF.py index 1f2b622e..a0b1b518 100644 --- a/cloudinit/DataSourceOVF.py +++ b/cloudinit/DataSourceOVF.py @@ -162,7 +162,7 @@ def get_ovf_env(dirname): # transport functions take no input and return # a 3 tuple of content, path, filename -def transport_iso9660(require_iso=False): +def transport_iso9660(require_iso=True): # default_regex matches values in # /lib/udev/rules.d/60-cdrom_id.rules diff --git a/cloudinit/UserDataHandler.py b/cloudinit/UserDataHandler.py index 93d1d36a..98729056 100644 --- a/cloudinit/UserDataHandler.py +++ b/cloudinit/UserDataHandler.py @@ -71,6 +71,8 @@ def do_include(content, appendmsg): line = line[len("#include"):].lstrip() if line.startswith("#"): continue + if line.strip() == "": + continue # urls cannot not have leading or trailing white space msum = hashlib.md5() # pylint: disable=E1101 diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py index 7a34e053..9f188766 100644 --- a/cloudinit/__init__.py +++ b/cloudinit/__init__.py @@ -29,7 +29,7 @@ cfg_env_name = "CLOUD_CFG" cfg_builtin = """ log_cfgs: [] -datasource_list: ["NoCloud", "OVF", "Ec2"] +datasource_list: ["NoCloud", "ConfigDrive", "OVF", "MaaS", "Ec2" ] def_log_file: /var/log/cloud-init.log syslog_fix_perms: syslog:adm """ @@ -60,7 +60,6 @@ import cPickle import sys import os.path import errno -import pwd import subprocess import yaml import logging @@ -572,10 +571,14 @@ def handler_handle_part(mod, data, ctype, filename, payload, frequency): if not (modfreq == per_always or (frequency == per_instance and modfreq == per_instance)): return - if mod.handler_version == 1: - mod.handle_part(data, ctype, filename, payload) - else: - mod.handle_part(data, ctype, filename, payload, frequency) + try: + if mod.handler_version == 1: + mod.handle_part(data, ctype, filename, payload) + else: + mod.handle_part(data, ctype, filename, payload, frequency) + except: + util.logexc(log) + traceback.print_exc(file=sys.stderr) def partwalker_handle_handler(pdata, _ctype, _filename, payload): @@ -586,15 +589,13 @@ def partwalker_handle_handler(pdata, _ctype, _filename, payload): modfname = modname + ".py" util.write_file("%s/%s" % (pdata['handlerdir'], modfname), payload, 0600) - pdata['handlercount'] = curcount + 1 - try: mod = __import__(modname) handler_register(mod, pdata['handlers'], pdata['data'], frequency) + pdata['handlercount'] = curcount + 1 except: util.logexc(log) traceback.print_exc(file=sys.stderr) - return def partwalker_callback(pdata, ctype, filename, payload): diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index be7ed3a9..7e07812e 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -22,7 +22,7 @@ import subprocess -def netdev_info(): +def netdev_info(empty=""): fields = ("hwaddr", "addr", "bcast", "mask") ifcfg_out = str(subprocess.check_output(["ifconfig", "-a"])) devs = {} @@ -59,6 +59,13 @@ def netdev_info(): pass elif toks[i].startswith("%s:" % field): devs[curdev][target] = toks[i][len(field) + 1:] + + if empty != "": + for (_devname, dev) in devs.iteritems(): + for field in dev: + if dev[field] == "": + dev[field] = empty + return(devs) @@ -85,7 +92,7 @@ def getgateway(): def debug_info(pre="ci-info: "): lines = [] try: - netdev = netdev_info() + netdev = netdev_info(empty=".") except Exception: lines.append("netdev_info failed!") netdev = {} diff --git a/cloudinit/util.py b/cloudinit/util.py index e6489648..9133426c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -32,6 +32,7 @@ import re import socket import sys import time +import tempfile import traceback import urlparse @@ -208,16 +209,18 @@ def runparts(dirp, skip_no_exist=True): if skip_no_exist and not os.path.isdir(dirp): return - # per bug 857926, Fedora's run-parts will exit failure on empty dir - if os.path.isdir(dirp) and os.listdir(dirp) == []: - return - - cmd = ['run-parts', '--regex', '.*', dirp] - sp = subprocess.Popen(cmd) - sp.communicate() - if sp.returncode is not 0: - raise subprocess.CalledProcessError(sp.returncode, cmd) - return + failed = 0 + for exe_name in sorted(os.listdir(dirp)): + exe_path = os.path.join(dirp, exe_name) + if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): + popen = subprocess.Popen([exe_path]) + popen.communicate() + if popen.returncode is not 0: + failed += 1 + sys.stderr.write("failed: %s [%i]\n" % + (exe_path, popen.returncode)) + if failed: + raise RuntimeError('runparts: %i failures' % failed) def subp(args, input_=None): @@ -515,30 +518,70 @@ def dos2unix(string): return(string.replace('\r\n', '\n')) -def islxc(): - # is this host running lxc? +def is_container(): + # is this code running in a container of some sort + + for helper in ('running-in-container', 'lxc-is-container'): + try: + # try to run a helper program. if it returns true + # then we're inside a container. otherwise, no + sp = subprocess.Popen(helper, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + sp.communicate(None) + return(sp.returncode == 0) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + # this code is largely from the logic in + # ubuntu's /etc/init/container-detect.conf try: - with open("/proc/1/cgroup") as f: - if f.read() == "/": - return True + # Detect old-style libvirt + # Detect OpenVZ containers + pid1env = get_proc_env(1) + if "container" in pid1env: + return True + + if "LIBVIRT_LXC_UUID" in pid1env: + return True + except IOError as e: if e.errno != errno.ENOENT: - raise + pass + + # Detect OpenVZ containers + if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"): + return True try: - # try to run a program named 'lxc-is-container'. if it returns true, - # then we're inside a container. otherwise, no - sp = subprocess.Popen(['lxc-is-container'], stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - sp.communicate(None) - return(sp.returncode == 0) - except OSError as e: + # Detect Vserver containers + with open("/proc/self/status") as fp: + lines = fp.read().splitlines() + for line in lines: + if line.startswith("VxID:"): + (_key, val) = line.strip().split(":", 1) + if val != "0": + return True + except IOError as e: if e.errno != errno.ENOENT: - raise + pass return False +def get_proc_env(pid): + # return the environment in a dict that a given process id was started with + env = {} + with open("/proc/%s/environ" % pid) as fp: + toks = fp.read().split("\0") + for tok in toks: + if tok == "": + continue + (name, val) = tok.split("=", 1) + env[name] = val + return env + + def get_hostname_fqdn(cfg, cloud): # return the hostname and fqdn from 'cfg'. If not found in cfg, # then fall back to data from cloud @@ -630,3 +673,170 @@ def close_stdin(): return with open(os.devnull) as fp: os.dup2(fp.fileno(), sys.stdin.fileno()) + + +def find_devs_with(criteria): + """ + find devices matching given criteria (via blkid) + criteria can be *one* of: + TYPE=<filesystem> + LABEL=<label> + UUID=<uuid> + """ + try: + (out, _err) = subp(['blkid', '-t%s' % criteria, '-odevice']) + except subprocess.CalledProcessError: + return([]) + return(str(out).splitlines()) + + +class mountFailedError(Exception): + pass + + +def mount_callback_umount(device, callback, data=None): + """ + mount the device, call method 'callback' passing the directory + in which it was mounted, then unmount. Return whatever 'callback' + returned. If data != None, also pass data to callback. + """ + + def _cleanup(umount, tmpd): + if umount: + try: + subp(["umount", '-l', umount]) + except subprocess.CalledProcessError: + raise + if tmpd: + os.rmdir(tmpd) + + # go through mounts to see if it was already mounted + fp = open("/proc/mounts") + mounts = fp.readlines() + fp.close() + + tmpd = None + + mounted = {} + for mpline in mounts: + (dev, mp, fstype, _opts, _freq, _passno) = mpline.split() + mp = mp.replace("\\040", " ") + mounted[dev] = (dev, fstype, mp, False) + + umount = False + if device in mounted: + mountpoint = "%s/" % mounted[device][2] + else: + tmpd = tempfile.mkdtemp() + + mountcmd = ["mount", "-o", "ro", device, tmpd] + + try: + (_out, _err) = subp(mountcmd) + umount = tmpd + except subprocess.CalledProcessError as exc: + _cleanup(umount, tmpd) + raise mountFailedError(exc.output[1]) + + mountpoint = "%s/" % tmpd + + try: + if data == None: + ret = callback(mountpoint) + else: + ret = callback(mountpoint, data) + + except Exception as exc: + _cleanup(umount, tmpd) + raise exc + + _cleanup(umount, tmpd) + + return(ret) + + +def wait_for_url(urls, max_wait=None, timeout=None, + status_cb=None, headers_cb=None): + """ + urls: a list of urls to try + max_wait: roughly the maximum time to wait before giving up + The max time is *actually* len(urls)*timeout as each url will + be tried once and given the timeout provided. + timeout: the timeout provided to urllib2.urlopen + status_cb: call method with string message when a url is not available + headers_cb: call method with single argument of url to get headers + for request. + + the idea of this routine is to wait for the EC2 metdata service to + come up. On both Eucalyptus and EC2 we have seen the case where + the instance hit the MD before the MD service was up. EC2 seems + to have permenantely fixed this, though. + + In openstack, the metadata service might be painfully slow, and + unable to avoid hitting a timeout of even up to 10 seconds or more + (LP: #894279) for a simple GET. + + Offset those needs with the need to not hang forever (and block boot) + on a system where cloud-init is configured to look for EC2 Metadata + service but is not going to find one. It is possible that the instance + data host (169.254.169.254) may be firewalled off Entirely for a sytem, + meaning that the connection will block forever unless a timeout is set. + """ + starttime = time.time() + + sleeptime = 1 + + def nullstatus_cb(msg): + return + + if status_cb == None: + status_cb = nullstatus_cb + + def timeup(max_wait, starttime): + return((max_wait <= 0 or max_wait == None) or + (time.time() - starttime > max_wait)) + + loop_n = 0 + while True: + sleeptime = int(loop_n / 5) + 1 + for url in urls: + now = time.time() + if loop_n != 0: + if timeup(max_wait, starttime): + break + if timeout and (now + timeout > (starttime + max_wait)): + # shorten timeout to not run way over max_time + timeout = int((starttime + max_wait) - now) + + reason = "" + try: + if headers_cb != None: + headers = headers_cb(url) + else: + headers = {} + + req = urllib2.Request(url, data=None, headers=headers) + resp = urllib2.urlopen(req, timeout=timeout) + if resp.read() != "": + return url + reason = "empty data [%s]" % resp.getcode() + except urllib2.HTTPError as e: + reason = "http error [%s]" % e.code + except urllib2.URLError as e: + reason = "url error [%s]" % e.reason + except socket.timeout as e: + reason = "socket timeout [%s]" % e + except Exception as e: + reason = "unexpected error [%s]" % e + + status_cb("'%s' failed [%s/%ss]: %s" % + (url, int(time.time() - starttime), max_wait, + reason)) + + if timeup(max_wait, starttime): + break + + loop_n = loop_n + 1 + time.sleep(sleeptime) + + return False |