summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
authorJoshua Harlow <harlowja@yahoo-inc.com>2012-06-07 12:45:28 -0700
committerJoshua Harlow <harlowja@yahoo-inc.com>2012-06-07 12:45:28 -0700
commit869402301c9793cece24a9357ee3c13dcdafb6e2 (patch)
tree27a80c8be75ba6928f32c982deac50372f825420 /cloudinit/sources
parent8900f9cba622eeaf3810003c5a6ff7522312277b (diff)
downloadvyos-cloud-init-869402301c9793cece24a9357ee3c13dcdafb6e2.tar.gz
vyos-cloud-init-869402301c9793cece24a9357ee3c13dcdafb6e2.zip
Darn it. Those shouldn't be there!
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSource.py214
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py92
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py231
-rw-r--r--cloudinit/sources/DataSourceEc2.py217
-rw-r--r--cloudinit/sources/DataSourceMAAS.py345
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py232
-rw-r--r--cloudinit/sources/DataSourceOVF.py332
-rw-r--r--cloudinit/sources/__init__.py0
8 files changed, 1663 insertions, 0 deletions
diff --git a/cloudinit/sources/DataSource.py b/cloudinit/sources/DataSource.py
new file mode 100644
index 00000000..e2a9150d
--- /dev/null
+++ b/cloudinit/sources/DataSource.py
@@ -0,0 +1,214 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+DEP_FILESYSTEM = "FILESYSTEM"
+DEP_NETWORK = "NETWORK"
+
+import cloudinit.UserDataHandler as ud
+import cloudinit.util as util
+import socket
+
+
+class DataSource:
+ userdata = None
+ metadata = None
+ userdata_raw = None
+ cfgname = ""
+ # system config (passed in from cloudinit,
+ # cloud-config before input from the DataSource)
+ sys_cfg = {}
+ # datasource config, the cloud-config['datasource']['__name__']
+ ds_cfg = {} # datasource config
+
+ def __init__(self, sys_cfg=None):
+ if not self.cfgname:
+ name = str(self.__class__).split(".")[-1]
+ if name.startswith("DataSource"):
+ name = name[len("DataSource"):]
+ self.cfgname = name
+ if sys_cfg:
+ self.sys_cfg = sys_cfg
+
+ self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
+ ("datasource", self.cfgname), self.ds_cfg)
+
+ def get_userdata(self):
+ if self.userdata == None:
+ self.userdata = ud.preprocess_userdata(self.userdata_raw)
+ return self.userdata
+
+ def get_userdata_raw(self):
+ return(self.userdata_raw)
+
+ # the data sources' config_obj is a cloud-config formated
+ # object that came to it from ways other than cloud-config
+ # because cloud-config content would be handled elsewhere
+ def get_config_obj(self):
+ return({})
+
+ def get_public_ssh_keys(self):
+ keys = []
+ if 'public-keys' not in self.metadata:
+ return([])
+
+ if isinstance(self.metadata['public-keys'], str):
+ return(str(self.metadata['public-keys']).splitlines())
+
+ if isinstance(self.metadata['public-keys'], list):
+ return(self.metadata['public-keys'])
+
+ for _keyname, klist in self.metadata['public-keys'].items():
+ # lp:506332 uec metadata service responds with
+ # data that makes boto populate a string for 'klist' rather
+ # than a list.
+ if isinstance(klist, str):
+ klist = [klist]
+ for pkey in klist:
+ # there is an empty string at the end of the keylist, trim it
+ if pkey:
+ keys.append(pkey)
+
+ return(keys)
+
+ def device_name_to_device(self, _name):
+ # translate a 'name' to a device
+ # the primary function at this point is on ec2
+ # to consult metadata service, that has
+ # ephemeral0: sdb
+ # and return 'sdb' for input 'ephemeral0'
+ return(None)
+
+ def get_locale(self):
+ return('en_US.UTF-8')
+
+ def get_local_mirror(self):
+ return None
+
+ def get_instance_id(self):
+ if 'instance-id' not in self.metadata:
+ return "iid-datasource"
+ return(self.metadata['instance-id'])
+
+ def get_hostname(self, fqdn=False):
+ defdomain = "localdomain"
+ defhost = "localhost"
+
+ domain = defdomain
+ if not 'local-hostname' in self.metadata:
+
+ # this is somewhat questionable really.
+ # the cloud datasource was asked for a hostname
+ # and didn't have one. raising error might be more appropriate
+ # but instead, basically look up the existing hostname
+ toks = []
+
+ hostname = socket.gethostname()
+
+ fqdn = util.get_fqdn_from_hosts(hostname)
+
+ if fqdn and fqdn.find(".") > 0:
+ toks = str(fqdn).split(".")
+ elif hostname:
+ toks = [hostname, defdomain]
+ else:
+ toks = [defhost, defdomain]
+
+ else:
+ # if there is an ipv4 address in 'local-hostname', then
+ # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
+ lhost = self.metadata['local-hostname']
+ if is_ipv4(lhost):
+ toks = "ip-%s" % lhost.replace(".", "-")
+ else:
+ toks = lhost.split(".")
+
+ if len(toks) > 1:
+ hostname = toks[0]
+ domain = '.'.join(toks[1:])
+ else:
+ hostname = toks[0]
+
+ if fqdn:
+ return "%s.%s" % (hostname, domain)
+ else:
+ return hostname
+
+
+# return a list of classes that have the same depends as 'depends'
+# iterate through cfg_list, loading "DataSourceCollections" modules
+# and calling their "get_datasource_list".
+# return an ordered list of classes that match
+#
+# - modules must be named "DataSource<item>", where 'item' is an entry
+# in cfg_list
+# - if pkglist is given, it will iterate try loading from that package
+# ie, pkglist=[ "foo", "" ]
+# will first try to load foo.DataSource<item>
+# then DataSource<item>
+def list_sources(cfg_list, depends, pkglist=None):
+ if pkglist is None:
+ pkglist = []
+ retlist = []
+ for ds_coll in cfg_list:
+ for pkg in pkglist:
+ if pkg:
+ pkg = "%s." % pkg
+ try:
+ mod = __import__("%sDataSource%s" % (pkg, ds_coll))
+ if pkg:
+ mod = getattr(mod, "DataSource%s" % ds_coll)
+ lister = getattr(mod, "get_datasource_list")
+ retlist.extend(lister(depends))
+ break
+ except:
+ raise
+ return(retlist)
+
+
+# depends is a list of dependencies (DEP_FILESYSTEM)
+# dslist is a list of 2 item lists
+# dslist = [
+# ( class, ( depends-that-this-class-needs ) )
+# }
+# it returns a list of 'class' that matched these deps exactly
+# it is a helper function for DataSourceCollections
+def list_from_depends(depends, dslist):
+ retlist = []
+ depset = set(depends)
+ for elem in dslist:
+ (cls, deps) = elem
+ if depset == set(deps):
+ retlist.append(cls)
+ return(retlist)
+
+
+def is_ipv4(instr):
+ """ determine if input string is a ipv4 address. return boolean"""
+ toks = instr.split('.')
+ if len(toks) != 4:
+ return False
+
+ try:
+ toks = [x for x in toks if (int(x) < 256 and int(x) > 0)]
+ except:
+ return False
+
+ return (len(toks) == 4)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
new file mode 100644
index 00000000..5afdf7b6
--- /dev/null
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -0,0 +1,92 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Cosmin Luta
+#
+# Author: Cosmin Luta <q4break@gmail.com>
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cloudinit.DataSource as DataSource
+
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
+import cloudinit.util as util
+from socket import inet_ntoa
+import time
+import boto.utils as boto_utils
+from struct import pack
+
+
+class DataSourceCloudStack(DataSource.DataSource):
+ api_ver = 'latest'
+ seeddir = base_seeddir + '/cs'
+ metadata_address = None
+
+ def __init__(self, sys_cfg=None):
+ DataSource.DataSource.__init__(self, sys_cfg)
+ # Cloudstack has its metadata/userdata URLs located at
+ # http://<default-gateway-ip>/latest/
+ self.metadata_address = "http://%s/" % self.get_default_gateway()
+
+ def get_default_gateway(self):
+ """ Returns the default gateway ip address in the dotted format
+ """
+ with open("/proc/net/route", "r") as f:
+ for line in f.readlines():
+ items = line.split("\t")
+ if items[1] == "00000000":
+ # found the default route, get the gateway
+ gw = inet_ntoa(pack("<L", int(items[2], 16)))
+ log.debug("found default route, gateway is %s" % gw)
+ return gw
+
+ def __str__(self):
+ return "DataSourceCloudStack"
+
+ def get_data(self):
+ seedret = {}
+ if util.read_optional_seed(seedret, base=self.seeddir + "/"):
+ self.userdata_raw = seedret['user-data']
+ self.metadata = seedret['meta-data']
+ log.debug("using seeded cs data in %s" % self.seeddir)
+ return True
+
+ try:
+ start = time.time()
+ self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver,
+ None, self.metadata_address)
+ self.metadata = boto_utils.get_instance_metadata(self.api_ver,
+ self.metadata_address)
+ log.debug("crawl of metadata service took %ds" %
+ (time.time() - start))
+ return True
+ except Exception as e:
+ log.exception(e)
+ return False
+
+ def get_instance_id(self):
+ return self.metadata['instance-id']
+
+ def get_availability_zone(self):
+ return self.metadata['availability-zone']
+
+datasources = [
+ (DataSourceCloudStack, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+]
+
+
+# return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return DataSource.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
new file mode 100644
index 00000000..2db4a76a
--- /dev/null
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -0,0 +1,231 @@
+# Copyright (C) 2012 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cloudinit.DataSource as DataSource
+
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
+import cloudinit.util as util
+import os.path
+import os
+import json
+import subprocess
+
+DEFAULT_IID = "iid-dsconfigdrive"
+
+
+class DataSourceConfigDrive(DataSource.DataSource):
+ seed = None
+ seeddir = base_seeddir + '/config_drive'
+ cfg = {}
+ userdata_raw = None
+ metadata = None
+ dsmode = "local"
+
+ def __str__(self):
+ mstr = "DataSourceConfigDrive[%s]" % self.dsmode
+ mstr = mstr + " [seed=%s]" % self.seed
+ return(mstr)
+
+ def get_data(self):
+ found = None
+ md = {}
+ ud = ""
+
+ defaults = {"instance-id": DEFAULT_IID, "dsmode": "pass"}
+
+ if os.path.isdir(self.seeddir):
+ try:
+ (md, ud) = read_config_drive_dir(self.seeddir)
+ found = self.seeddir
+ except nonConfigDriveDir:
+ pass
+
+ if not found:
+ dev = cfg_drive_device()
+ if dev:
+ try:
+ (md, ud) = util.mount_callback_umount(dev,
+ read_config_drive_dir)
+ found = dev
+ except (nonConfigDriveDir, util.mountFailedError):
+ pass
+
+ if not found:
+ return False
+
+ if 'dsconfig' in md:
+ self.cfg = md['dscfg']
+
+ md = util.mergedict(md, defaults)
+
+ # update interfaces and ifup only on the local datasource
+ # this way the DataSourceConfigDriveNet doesn't do it also.
+ if 'network-interfaces' in md and self.dsmode == "local":
+ if md['dsmode'] == "pass":
+ log.info("updating network interfaces from configdrive")
+ else:
+ log.debug("updating network interfaces from configdrive")
+
+ util.write_file("/etc/network/interfaces",
+ md['network-interfaces'])
+ try:
+ (out, err) = util.subp(['ifup', '--all'])
+ if len(out) or len(err):
+ log.warn("ifup --all had stderr: %s" % err)
+
+ except subprocess.CalledProcessError as exc:
+ log.warn("ifup --all failed: %s" % (exc.output[1]))
+
+ self.seed = found
+ self.metadata = md
+ self.userdata_raw = ud
+
+ if md['dsmode'] == self.dsmode:
+ return True
+
+ log.debug("%s: not claiming datasource, dsmode=%s" %
+ (self, md['dsmode']))
+ return False
+
+ def get_public_ssh_keys(self):
+ if not 'public-keys' in self.metadata:
+ return([])
+ return(self.metadata['public-keys'])
+
+ # the data sources' config_obj is a cloud-config formated
+ # object that came to it from ways other than cloud-config
+ # because cloud-config content would be handled elsewhere
+ def get_config_obj(self):
+ return(self.cfg)
+
+
+class DataSourceConfigDriveNet(DataSourceConfigDrive):
+ dsmode = "net"
+
+
+class nonConfigDriveDir(Exception):
+ pass
+
+
+def cfg_drive_device():
+ """ get the config drive device. return a string like '/dev/vdb'
+ or None (if there is no non-root device attached). This does not
+ check the contents, only reports that if there *were* a config_drive
+ attached, it would be this device.
+ per config_drive documentation, this is
+ "associated as the last available disk on the instance"
+ """
+
+ if 'CLOUD_INIT_CONFIG_DRIVE_DEVICE' in os.environ:
+ return(os.environ['CLOUD_INIT_CONFIG_DRIVE_DEVICE'])
+
+ # we are looking for a raw block device (sda, not sda1) with a vfat
+ # filesystem on it.
+
+ letters = "abcdefghijklmnopqrstuvwxyz"
+ devs = util.find_devs_with("TYPE=vfat")
+
+ # filter out anything not ending in a letter (ignore partitions)
+ devs = [f for f in devs if f[-1] in letters]
+
+ # sort them in reverse so "last" device is first
+ devs.sort(reverse=True)
+
+ if len(devs):
+ return(devs[0])
+
+ return(None)
+
+
+def read_config_drive_dir(source_dir):
+ """
+ read_config_drive_dir(source_dir):
+ read source_dir, and return a tuple with metadata dict and user-data
+ string populated. If not a valid dir, raise a nonConfigDriveDir
+ """
+ md = {}
+ ud = ""
+
+ flist = ("etc/network/interfaces", "root/.ssh/authorized_keys", "meta.js")
+ found = [f for f in flist if os.path.isfile("%s/%s" % (source_dir, f))]
+ keydata = ""
+
+ if len(found) == 0:
+ raise nonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
+
+ if "etc/network/interfaces" in found:
+ with open("%s/%s" % (source_dir, "/etc/network/interfaces")) as fp:
+ md['network-interfaces'] = fp.read()
+
+ if "root/.ssh/authorized_keys" in found:
+ with open("%s/%s" % (source_dir, "root/.ssh/authorized_keys")) as fp:
+ keydata = fp.read()
+
+ meta_js = {}
+
+ if "meta.js" in found:
+ content = ''
+ with open("%s/%s" % (source_dir, "meta.js")) as fp:
+ content = fp.read()
+ md['meta_js'] = content
+ try:
+ meta_js = json.loads(content)
+ except ValueError:
+ raise nonConfigDriveDir("%s: %s" %
+ (source_dir, "invalid json in meta.js"))
+
+ keydata = meta_js.get('public-keys', keydata)
+
+ if keydata:
+ lines = keydata.splitlines()
+ md['public-keys'] = [l for l in lines
+ if len(l) and not l.startswith("#")]
+
+ for copy in ('dsmode', 'instance-id', 'dscfg'):
+ if copy in meta_js:
+ md[copy] = meta_js[copy]
+
+ if 'user-data' in meta_js:
+ ud = meta_js['user-data']
+
+ return(md, ud)
+
+datasources = (
+ (DataSourceConfigDrive, (DataSource.DEP_FILESYSTEM, )),
+ (DataSourceConfigDriveNet,
+ (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+)
+
+
+# return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return(DataSource.list_from_depends(depends, datasources))
+
+if __name__ == "__main__":
+ def main():
+ import sys
+ import pprint
+ print cfg_drive_device()
+ (md, ud) = read_config_drive_dir(sys.argv[1])
+ print "=== md ==="
+ pprint.pprint(md)
+ print "=== ud ==="
+ print(ud)
+
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
new file mode 100644
index 00000000..7051ecda
--- /dev/null
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -0,0 +1,217 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cloudinit.DataSource as DataSource
+
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
+import cloudinit.util as util
+import socket
+import time
+import boto.utils as boto_utils
+import os.path
+
+
+class DataSourceEc2(DataSource.DataSource):
+ api_ver = '2009-04-04'
+ seeddir = base_seeddir + '/ec2'
+ metadata_address = "http://169.254.169.254"
+
+ def __str__(self):
+ return("DataSourceEc2")
+
+ def get_data(self):
+ seedret = {}
+ if util.read_optional_seed(seedret, base=self.seeddir + "/"):
+ self.userdata_raw = seedret['user-data']
+ self.metadata = seedret['meta-data']
+ log.debug("using seeded ec2 data in %s" % self.seeddir)
+ return True
+
+ try:
+ if not self.wait_for_metadata_service():
+ return False
+ start = time.time()
+ self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver,
+ None, self.metadata_address)
+ self.metadata = boto_utils.get_instance_metadata(self.api_ver,
+ self.metadata_address)
+ log.debug("crawl of metadata service took %ds" % (time.time() -
+ start))
+ return True
+ except Exception as e:
+ print e
+ return False
+
+ def get_instance_id(self):
+ return(self.metadata['instance-id'])
+
+ def get_availability_zone(self):
+ return(self.metadata['placement']['availability-zone'])
+
+ def get_local_mirror(self):
+ return(self.get_mirror_from_availability_zone())
+
+ def get_mirror_from_availability_zone(self, availability_zone=None):
+ # availability is like 'us-west-1b' or 'eu-west-1a'
+ if availability_zone == None:
+ availability_zone = self.get_availability_zone()
+
+ fallback = None
+
+ if self.is_vpc():
+ return fallback
+
+ try:
+ host = "%s.ec2.archive.ubuntu.com" % availability_zone[:-1]
+ socket.getaddrinfo(host, None, 0, socket.SOCK_STREAM)
+ return 'http://%s/ubuntu/' % host
+ except:
+ return fallback
+
+ def wait_for_metadata_service(self):
+ mcfg = self.ds_cfg
+
+ if not hasattr(mcfg, "get"):
+ mcfg = {}
+
+ max_wait = 120
+ try:
+ max_wait = int(mcfg.get("max_wait", max_wait))
+ except Exception:
+ util.logexc(log)
+ log.warn("Failed to get max wait. using %s" % max_wait)
+
+ if max_wait == 0:
+ return False
+
+ timeout = 50
+ try:
+ timeout = int(mcfg.get("timeout", timeout))
+ except Exception:
+ util.logexc(log)
+ log.warn("Failed to get timeout, using %s" % timeout)
+
+ def_mdurls = ["http://169.254.169.254", "http://instance-data:8773"]
+ mdurls = mcfg.get("metadata_urls", def_mdurls)
+
+ # Remove addresses from the list that wont resolve.
+ filtered = [x for x in mdurls if util.is_resolvable_url(x)]
+
+ if set(filtered) != set(mdurls):
+ log.debug("removed the following from metadata urls: %s" %
+ list((set(mdurls) - set(filtered))))
+
+ if len(filtered):
+ mdurls = filtered
+ else:
+ log.warn("Empty metadata url list! using default list")
+ mdurls = def_mdurls
+
+ urls = []
+ url2base = {False: False}
+ for url in mdurls:
+ cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver)
+ urls.append(cur)
+ url2base[cur] = url
+
+ starttime = time.time()
+ url = util.wait_for_url(urls=urls, max_wait=max_wait,
+ timeout=timeout, status_cb=log.warn)
+
+ if url:
+ log.debug("Using metadata source: '%s'" % url2base[url])
+ else:
+ log.critical("giving up on md after %i seconds\n" %
+ int(time.time() - starttime))
+
+ self.metadata_address = url2base[url]
+ return (bool(url))
+
+ def device_name_to_device(self, name):
+ # consult metadata service, that has
+ # ephemeral0: sdb
+ # and return 'sdb' for input 'ephemeral0'
+ if 'block-device-mapping' not in self.metadata:
+ return(None)
+
+ found = None
+ for entname, device in self.metadata['block-device-mapping'].items():
+ if entname == name:
+ found = device
+ break
+ # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0'
+ if entname == "ephemeral" and name == "ephemeral0":
+ found = device
+ if found == None:
+ log.debug("unable to convert %s to a device" % name)
+ return None
+
+ # LP: #611137
+ # the metadata service may believe that devices are named 'sda'
+ # when the kernel named them 'vda' or 'xvda'
+ # we want to return the correct value for what will actually
+ # exist in this instance
+ mappings = {"sd": ("vd", "xvd")}
+ ofound = found
+ short = os.path.basename(found)
+
+ if not found.startswith("/"):
+ found = "/dev/%s" % found
+
+ if os.path.exists(found):
+ return(found)
+
+ for nfrom, tlist in mappings.items():
+ if not short.startswith(nfrom):
+ continue
+ for nto in tlist:
+ cand = "/dev/%s%s" % (nto, short[len(nfrom):])
+ if os.path.exists(cand):
+ log.debug("remapped device name %s => %s" % (found, cand))
+ return(cand)
+
+ # on t1.micro, ephemeral0 will appear in block-device-mapping from
+ # metadata, but it will not exist on disk (and never will)
+ # at this pint, we've verified that the path did not exist
+ # in the special case of 'ephemeral0' return None to avoid bogus
+ # fstab entry (LP: #744019)
+ if name == "ephemeral0":
+ return None
+ return ofound
+
+ def is_vpc(self):
+ # per comment in LP: #615545
+ ph = "public-hostname"
+ p4 = "public-ipv4"
+ if ((ph not in self.metadata or self.metadata[ph] == "") and
+ (p4 not in self.metadata or self.metadata[p4] == "")):
+ return True
+ return False
+
+
+datasources = [
+ (DataSourceEc2, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+]
+
+
+# return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return(DataSource.list_from_depends(depends, datasources))
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
new file mode 100644
index 00000000..61a0038f
--- /dev/null
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -0,0 +1,345 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cloudinit.DataSource as DataSource
+
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
+import cloudinit.util as util
+import errno
+import oauth.oauth as oauth
+import os.path
+import urllib2
+import time
+
+
+MD_VERSION = "2012-03-01"
+
+
+class DataSourceMAAS(DataSource.DataSource):
+ """
+ DataSourceMAAS reads instance information from MAAS.
+ Given a config metadata_url, and oauth tokens, it expects to find
+ files under the root named:
+ instance-id
+ user-data
+ hostname
+ """
+ seeddir = base_seeddir + '/maas'
+ baseurl = None
+
+ def __str__(self):
+ return("DataSourceMAAS[%s]" % self.baseurl)
+
+ def get_data(self):
+ mcfg = self.ds_cfg
+
+ try:
+ (userdata, metadata) = read_maas_seed_dir(self.seeddir)
+ self.userdata_raw = userdata
+ self.metadata = metadata
+ self.baseurl = self.seeddir
+ return True
+ except MAASSeedDirNone:
+ pass
+ except MAASSeedDirMalformed as exc:
+ log.warn("%s was malformed: %s\n" % (self.seeddir, exc))
+ raise
+
+ try:
+ # if there is no metadata_url, then we're not configured
+ url = mcfg.get('metadata_url', None)
+ if url == None:
+ return False
+
+ if not self.wait_for_metadata_service(url):
+ return False
+
+ self.baseurl = url
+
+ (userdata, metadata) = read_maas_seed_url(self.baseurl,
+ self.md_headers)
+ self.userdata_raw = userdata
+ self.metadata = metadata
+ return True
+ except Exception:
+ util.logexc(log)
+ return False
+
+ def md_headers(self, url):
+ mcfg = self.ds_cfg
+
+ # if we are missing token_key, token_secret or consumer_key
+ # then just do non-authed requests
+ for required in ('token_key', 'token_secret', 'consumer_key'):
+ if required not in mcfg:
+ return({})
+
+ consumer_secret = mcfg.get('consumer_secret', "")
+
+ return(oauth_headers(url=url, consumer_key=mcfg['consumer_key'],
+ token_key=mcfg['token_key'], token_secret=mcfg['token_secret'],
+ consumer_secret=consumer_secret))
+
+ def wait_for_metadata_service(self, url):
+ mcfg = self.ds_cfg
+
+ max_wait = 120
+ try:
+ max_wait = int(mcfg.get("max_wait", max_wait))
+ except Exception:
+ util.logexc(log)
+ log.warn("Failed to get max wait. using %s" % max_wait)
+
+ if max_wait == 0:
+ return False
+
+ timeout = 50
+ try:
+ timeout = int(mcfg.get("timeout", timeout))
+ except Exception:
+ util.logexc(log)
+ log.warn("Failed to get timeout, using %s" % timeout)
+
+ starttime = time.time()
+ check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
+ url = util.wait_for_url(urls=[check_url], max_wait=max_wait,
+ timeout=timeout, status_cb=log.warn,
+ headers_cb=self.md_headers)
+
+ if url:
+ log.debug("Using metadata source: '%s'" % url)
+ else:
+ log.critical("giving up on md after %i seconds\n" %
+ int(time.time() - starttime))
+
+ return (bool(url))
+
+
+def read_maas_seed_dir(seed_d):
+ """
+ Return user-data and metadata for a maas seed dir in seed_d.
+ Expected format of seed_d are the following files:
+ * instance-id
+ * local-hostname
+ * user-data
+ """
+ files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
+ md = {}
+
+ if not os.path.isdir(seed_d):
+ raise MAASSeedDirNone("%s: not a directory")
+
+ for fname in files:
+ try:
+ with open(os.path.join(seed_d, fname)) as fp:
+ md[fname] = fp.read()
+ fp.close()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ return(check_seed_contents(md, seed_d))
+
+
+def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
+ version=MD_VERSION):
+ """
+ Read the maas datasource at seed_url.
+ header_cb is a method that should return a headers dictionary that will
+ be given to urllib2.Request()
+
+ Expected format of seed_url is are the following files:
+ * <seed_url>/<version>/meta-data/instance-id
+ * <seed_url>/<version>/meta-data/local-hostname
+ * <seed_url>/<version>/user-data
+ """
+ files = ('meta-data/local-hostname',
+ 'meta-data/instance-id',
+ 'meta-data/public-keys',
+ 'user-data')
+
+ base_url = "%s/%s" % (seed_url, version)
+ md = {}
+ for fname in files:
+ url = "%s/%s" % (base_url, fname)
+ if header_cb:
+ headers = header_cb(url)
+ else:
+ headers = {}
+
+ try:
+ req = urllib2.Request(url, data=None, headers=headers)
+ resp = urllib2.urlopen(req, timeout=timeout)
+ md[os.path.basename(fname)] = resp.read()
+ except urllib2.HTTPError as e:
+ if e.code != 404:
+ raise
+
+ return(check_seed_contents(md, seed_url))
+
+
+def check_seed_contents(content, seed):
+ """Validate if content is Is the content a dict that is valid as a
+ return for a datasource.
+ Either return a (userdata, metadata) tuple or
+ Raise MAASSeedDirMalformed or MAASSeedDirNone
+ """
+ md_required = ('instance-id', 'local-hostname')
+ found = content.keys()
+
+ if len(content) == 0:
+ raise MAASSeedDirNone("%s: no data files found" % seed)
+
+ missing = [k for k in md_required if k not in found]
+ if len(missing):
+ raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
+
+ userdata = content.get('user-data', "")
+ md = {}
+ for (key, val) in content.iteritems():
+ if key == 'user-data':
+ continue
+ md[key] = val
+
+ return(userdata, md)
+
+
+def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret):
+ consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
+ token = oauth.OAuthToken(token_key, token_secret)
+ params = {
+ 'oauth_version': "1.0",
+ 'oauth_nonce': oauth.generate_nonce(),
+ 'oauth_timestamp': int(time.time()),
+ 'oauth_token': token.key,
+ 'oauth_consumer_key': consumer.key,
+ }
+ req = oauth.OAuthRequest(http_url=url, parameters=params)
+ req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
+ consumer, token)
+ return(req.to_header())
+
+
+class MAASSeedDirNone(Exception):
+ pass
+
+
+class MAASSeedDirMalformed(Exception):
+ pass
+
+
+datasources = [
+ (DataSourceMAAS, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+]
+
+
+# return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return(DataSource.list_from_depends(depends, datasources))
+
+
+if __name__ == "__main__":
+ def main():
+ """
+ Call with single argument of directory or http or https url.
+ If url is given additional arguments are allowed, which will be
+ interpreted as consumer_key, token_key, token_secret, consumer_secret
+ """
+ import argparse
+ import pprint
+
+ parser = argparse.ArgumentParser(description='Interact with MAAS DS')
+ parser.add_argument("--config", metavar="file",
+ help="specify DS config file", default=None)
+ parser.add_argument("--ckey", metavar="key",
+ help="the consumer key to auth with", default=None)
+ parser.add_argument("--tkey", metavar="key",
+ help="the token key to auth with", default=None)
+ parser.add_argument("--csec", metavar="secret",
+ help="the consumer secret (likely '')", default="")
+ parser.add_argument("--tsec", metavar="secret",
+ help="the token secret to auth with", default=None)
+ parser.add_argument("--apiver", metavar="version",
+ help="the apiver to use ("" can be used)", default=MD_VERSION)
+
+ subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
+ subcmds.add_parser('crawl', help="crawl the datasource")
+ subcmds.add_parser('get', help="do a single GET of provided url")
+ subcmds.add_parser('check-seed', help="read andn verify seed at url")
+
+ parser.add_argument("url", help="the data source to query")
+
+ args = parser.parse_args()
+
+ creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
+ 'token_secret': args.tsec, 'consumer_secret': args.csec}
+
+ if args.config:
+ import yaml
+ with open(args.config) as fp:
+ cfg = yaml.load(fp)
+ if 'datasource' in cfg:
+ cfg = cfg['datasource']['MAAS']
+ for key in creds.keys():
+ if key in cfg and creds[key] == None:
+ creds[key] = cfg[key]
+
+ def geturl(url, headers_cb):
+ req = urllib2.Request(url, data=None, headers=headers_cb(url))
+ return(urllib2.urlopen(req).read())
+
+ def printurl(url, headers_cb):
+ print "== %s ==\n%s\n" % (url, geturl(url, headers_cb))
+
+ def crawl(url, headers_cb=None):
+ if url.endswith("/"):
+ for line in geturl(url, headers_cb).splitlines():
+ if line.endswith("/"):
+ crawl("%s%s" % (url, line), headers_cb)
+ else:
+ printurl("%s%s" % (url, line), headers_cb)
+ else:
+ printurl(url, headers_cb)
+
+ def my_headers(url):
+ headers = {}
+ if creds.get('consumer_key', None) != None:
+ headers = oauth_headers(url, **creds)
+ return headers
+
+ if args.subcmd == "check-seed":
+ if args.url.startswith("http"):
+ (userdata, metadata) = read_maas_seed_url(args.url,
+ header_cb=my_headers, version=args.apiver)
+ else:
+ (userdata, metadata) = read_maas_seed_url(args.url)
+ print "=== userdata ==="
+ print userdata
+ print "=== metadata ==="
+ pprint.pprint(metadata)
+
+ elif args.subcmd == "get":
+ printurl(args.url, my_headers)
+
+ elif args.subcmd == "crawl":
+ if not args.url.endswith("/"):
+ args.url = "%s/" % args.url
+ crawl(args.url, my_headers)
+
+ main()
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
new file mode 100644
index 00000000..e8c56b8f
--- /dev/null
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -0,0 +1,232 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cloudinit.DataSource as DataSource
+
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
+import cloudinit.util as util
+import errno
+import subprocess
+
+
+class DataSourceNoCloud(DataSource.DataSource):
+ metadata = None
+ userdata = None
+ userdata_raw = None
+ supported_seed_starts = ("/", "file://")
+ dsmode = "local"
+ seed = None
+ cmdline_id = "ds=nocloud"
+ seeddir = base_seeddir + '/nocloud'
+
+ def __str__(self):
+ mstr = "DataSourceNoCloud"
+ mstr = mstr + " [seed=%s]" % self.seed
+ return(mstr)
+
+ def get_data(self):
+ defaults = {
+ "instance-id": "nocloud", "dsmode": self.dsmode
+ }
+
+ found = []
+ md = {}
+ ud = ""
+
+ try:
+ # parse the kernel command line, getting data passed in
+ if parse_cmdline_data(self.cmdline_id, md):
+ found.append("cmdline")
+ except:
+ util.logexc(log)
+ return False
+
+ # check to see if the seeddir has data.
+ seedret = {}
+ if util.read_optional_seed(seedret, base=self.seeddir + "/"):
+ md = util.mergedict(md, seedret['meta-data'])
+ ud = seedret['user-data']
+ found.append(self.seeddir)
+ log.debug("using seeded cache data in %s" % self.seeddir)
+
+ # if the datasource config had a 'seedfrom' entry, then that takes
+ # precedence over a 'seedfrom' that was found in a filesystem
+ # but not over external medi
+ if 'seedfrom' in self.ds_cfg and self.ds_cfg['seedfrom']:
+ found.append("ds_config")
+ md["seedfrom"] = self.ds_cfg['seedfrom']
+
+ fslist = util.find_devs_with("TYPE=vfat")
+ fslist.extend(util.find_devs_with("TYPE=iso9660"))
+
+ label_list = util.find_devs_with("LABEL=cidata")
+ devlist = list(set(fslist) & set(label_list))
+ devlist.sort(reverse=True)
+
+ for dev in devlist:
+ try:
+ (newmd, newud) = util.mount_callback_umount(dev,
+ util.read_seeded)
+ md = util.mergedict(newmd, md)
+ ud = newud
+
+ # for seed from a device, the default mode is 'net'.
+ # that is more likely to be what is desired.
+ # If they want dsmode of local, then they must
+ # specify that.
+ if 'dsmode' not in md:
+ md['dsmode'] = "net"
+
+ log.debug("using data from %s" % dev)
+ found.append(dev)
+ break
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ except util.mountFailedError:
+ log.warn("Failed to mount %s when looking for seed" % dev)
+
+ # there was no indication on kernel cmdline or data
+ # in the seeddir suggesting this handler should be used.
+ if len(found) == 0:
+ return False
+
+ seeded_interfaces = None
+
+ # the special argument "seedfrom" indicates we should
+ # attempt to seed the userdata / metadata from its value
+ # its primarily value is in allowing the user to type less
+ # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
+ if "seedfrom" in md:
+ seedfrom = md["seedfrom"]
+ seedfound = False
+ for proto in self.supported_seed_starts:
+ if seedfrom.startswith(proto):
+ seedfound = proto
+ break
+ if not seedfound:
+ log.debug("seed from %s not supported by %s" %
+ (seedfrom, self.__class__))
+ return False
+
+ if 'network-interfaces' in md:
+ seeded_interfaces = self.dsmode
+
+ # this could throw errors, but the user told us to do it
+ # so if errors are raised, let them raise
+ (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
+ log.debug("using seeded cache data from %s" % seedfrom)
+
+ # values in the command line override those from the seed
+ md = util.mergedict(md, md_seed)
+ found.append(seedfrom)
+
+ md = util.mergedict(md, defaults)
+
+ # update the network-interfaces if metadata had 'network-interfaces'
+ # entry and this is the local datasource, or 'seedfrom' was used
+ # and the source of the seed was self.dsmode
+ # ('local' for NoCloud, 'net' for NoCloudNet')
+ if ('network-interfaces' in md and
+ (self.dsmode in ("local", seeded_interfaces))):
+ log.info("updating network interfaces from nocloud")
+
+ util.write_file("/etc/network/interfaces",
+ md['network-interfaces'])
+ try:
+ (out, err) = util.subp(['ifup', '--all'])
+ if len(out) or len(err):
+ log.warn("ifup --all had stderr: %s" % err)
+
+ except subprocess.CalledProcessError as exc:
+ log.warn("ifup --all failed: %s" % (exc.output[1]))
+
+ self.seed = ",".join(found)
+ self.metadata = md
+ self.userdata_raw = ud
+
+ if md['dsmode'] == self.dsmode:
+ return True
+
+ log.debug("%s: not claiming datasource, dsmode=%s" %
+ (self, md['dsmode']))
+ return False
+
+
+# returns true or false indicating if cmdline indicated
+# that this module should be used
+# example cmdline:
+# root=LABEL=uec-rootfs ro ds=nocloud
+def parse_cmdline_data(ds_id, fill, cmdline=None):
+ if cmdline is None:
+ cmdline = util.get_cmdline()
+ cmdline = " %s " % cmdline
+
+ if not (" %s " % ds_id in cmdline or " %s;" % ds_id in cmdline):
+ return False
+
+ argline = ""
+ # cmdline can contain:
+ # ds=nocloud[;key=val;key=val]
+ for tok in cmdline.split():
+ if tok.startswith(ds_id):
+ argline = tok.split("=", 1)
+
+ # argline array is now 'nocloud' followed optionally by
+ # a ';' and then key=value pairs also terminated with ';'
+ tmp = argline[1].split(";")
+ if len(tmp) > 1:
+ kvpairs = tmp[1:]
+ else:
+ kvpairs = ()
+
+ # short2long mapping to save cmdline typing
+ s2l = {"h": "local-hostname", "i": "instance-id", "s": "seedfrom"}
+ for item in kvpairs:
+ try:
+ (k, v) = item.split("=", 1)
+ except:
+ k = item
+ v = None
+ if k in s2l:
+ k = s2l[k]
+ fill[k] = v
+
+ return(True)
+
+
+class DataSourceNoCloudNet(DataSourceNoCloud):
+ cmdline_id = "ds=nocloud-net"
+ supported_seed_starts = ("http://", "https://", "ftp://")
+ seeddir = base_seeddir + '/nocloud-net'
+ dsmode = "net"
+
+
+datasources = (
+ (DataSourceNoCloud, (DataSource.DEP_FILESYSTEM, )),
+ (DataSourceNoCloudNet,
+ (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+)
+
+
+# return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return(DataSource.list_from_depends(depends, datasources))
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
new file mode 100644
index 00000000..a0b1b518
--- /dev/null
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -0,0 +1,332 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cloudinit.DataSource as DataSource
+
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
+import cloudinit.util as util
+import os.path
+import os
+from xml.dom import minidom
+import base64
+import re
+import tempfile
+import subprocess
+
+
+class DataSourceOVF(DataSource.DataSource):
+ seed = None
+ seeddir = base_seeddir + '/ovf'
+ environment = None
+ cfg = {}
+ userdata_raw = None
+ metadata = None
+ supported_seed_starts = ("/", "file://")
+
+ def __str__(self):
+ mstr = "DataSourceOVF"
+ mstr = mstr + " [seed=%s]" % self.seed
+ return(mstr)
+
+ def get_data(self):
+ found = []
+ md = {}
+ ud = ""
+
+ defaults = {
+ "instance-id": "iid-dsovf"
+ }
+
+ (seedfile, contents) = get_ovf_env(base_seeddir)
+ if seedfile:
+ # found a seed dir
+ seed = "%s/%s" % (base_seeddir, seedfile)
+ (md, ud, cfg) = read_ovf_environment(contents)
+ self.environment = contents
+
+ found.append(seed)
+ else:
+ np = {'iso': transport_iso9660,
+ 'vmware-guestd': transport_vmware_guestd, }
+ name = None
+ for name, transfunc in np.iteritems():
+ (contents, _dev, _fname) = transfunc()
+ if contents:
+ break
+
+ if contents:
+ (md, ud, cfg) = read_ovf_environment(contents)
+ self.environment = contents
+ found.append(name)
+
+ # There was no OVF transports found
+ if len(found) == 0:
+ return False
+
+ if 'seedfrom' in md and md['seedfrom']:
+ seedfrom = md['seedfrom']
+ seedfound = False
+ for proto in self.supported_seed_starts:
+ if seedfrom.startswith(proto):
+ seedfound = proto
+ break
+ if not seedfound:
+ log.debug("seed from %s not supported by %s" %
+ (seedfrom, self.__class__))
+ return False
+
+ (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
+ log.debug("using seeded cache data from %s" % seedfrom)
+
+ md = util.mergedict(md, md_seed)
+ found.append(seedfrom)
+
+ md = util.mergedict(md, defaults)
+ self.seed = ",".join(found)
+ self.metadata = md
+ self.userdata_raw = ud
+ self.cfg = cfg
+ return True
+
+ def get_public_ssh_keys(self):
+ if not 'public-keys' in self.metadata:
+ return([])
+ return([self.metadata['public-keys'], ])
+
+ # the data sources' config_obj is a cloud-config formated
+ # object that came to it from ways other than cloud-config
+ # because cloud-config content would be handled elsewhere
+ def get_config_obj(self):
+ return(self.cfg)
+
+
+class DataSourceOVFNet(DataSourceOVF):
+ seeddir = base_seeddir + '/ovf-net'
+ supported_seed_starts = ("http://", "https://", "ftp://")
+
+
+# this will return a dict with some content
+# meta-data, user-data
+def read_ovf_environment(contents):
+ props = getProperties(contents)
+ md = {}
+ cfg = {}
+ ud = ""
+ cfg_props = ['password', ]
+ md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
+ for prop, val in props.iteritems():
+ if prop == 'hostname':
+ prop = "local-hostname"
+ if prop in md_props:
+ md[prop] = val
+ elif prop in cfg_props:
+ cfg[prop] = val
+ elif prop == "user-data":
+ try:
+ ud = base64.decodestring(val)
+ except:
+ ud = val
+ return(md, ud, cfg)
+
+
+# returns tuple of filename (in 'dirname', and the contents of the file)
+# on "not found", returns 'None' for filename and False for contents
+def get_ovf_env(dirname):
+ env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML")
+ for fname in env_names:
+ if os.path.isfile("%s/%s" % (dirname, fname)):
+ fp = open("%s/%s" % (dirname, fname))
+ contents = fp.read()
+ fp.close()
+ return(fname, contents)
+ return(None, False)
+
+
+# transport functions take no input and return
+# a 3 tuple of content, path, filename
+def transport_iso9660(require_iso=True):
+
+ # default_regex matches values in
+ # /lib/udev/rules.d/60-cdrom_id.rules
+ # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
+ envname = "CLOUD_INIT_CDROM_DEV_REGEX"
+ default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"
+
+ devname_regex = os.environ.get(envname, default_regex)
+ cdmatch = re.compile(devname_regex)
+
+ # go through mounts to see if it was already mounted
+ fp = open("/proc/mounts")
+ mounts = fp.readlines()
+ fp.close()
+
+ mounted = {}
+ for mpline in mounts:
+ (dev, mp, fstype, _opts, _freq, _passno) = mpline.split()
+ mounted[dev] = (dev, fstype, mp, False)
+ mp = mp.replace("\\040", " ")
+ if fstype != "iso9660" and require_iso:
+ continue
+
+ if cdmatch.match(dev[5:]) == None: # take off '/dev/'
+ continue
+
+ (fname, contents) = get_ovf_env(mp)
+ if contents is not False:
+ return(contents, dev, fname)
+
+ tmpd = None
+ dvnull = None
+
+ devs = os.listdir("/dev/")
+ devs.sort()
+
+ for dev in devs:
+ fullp = "/dev/%s" % dev
+
+ if fullp in mounted or not cdmatch.match(dev) or os.path.isdir(fullp):
+ continue
+
+ fp = None
+ try:
+ fp = open(fullp, "rb")
+ fp.read(512)
+ fp.close()
+ except:
+ if fp:
+ fp.close()
+ continue
+
+ if tmpd is None:
+ tmpd = tempfile.mkdtemp()
+ if dvnull is None:
+ try:
+ dvnull = open("/dev/null")
+ except:
+ pass
+
+ cmd = ["mount", "-o", "ro", fullp, tmpd]
+ if require_iso:
+ cmd.extend(('-t', 'iso9660'))
+
+ rc = subprocess.call(cmd, stderr=dvnull, stdout=dvnull, stdin=dvnull)
+ if rc:
+ continue
+
+ (fname, contents) = get_ovf_env(tmpd)
+
+ subprocess.call(["umount", tmpd])
+
+ if contents is not False:
+ os.rmdir(tmpd)
+ return(contents, fullp, fname)
+
+ if tmpd:
+ os.rmdir(tmpd)
+
+ if dvnull:
+ dvnull.close()
+
+ return(False, None, None)
+
+
+def transport_vmware_guestd():
+ # http://blogs.vmware.com/vapp/2009/07/ \
+ # selfconfiguration-and-the-ovf-environment.html
+ # try:
+ # cmd = ['vmware-guestd', '--cmd', 'info-get guestinfo.ovfEnv']
+ # (out, err) = subp(cmd)
+ # return(out, 'guestinfo.ovfEnv', 'vmware-guestd')
+ # except:
+ # # would need to error check here and see why this failed
+ # # to know if log/error should be raised
+ # return(False, None, None)
+ return(False, None, None)
+
+
+def findChild(node, filter_func):
+ ret = []
+ if not node.hasChildNodes():
+ return ret
+ for child in node.childNodes:
+ if filter_func(child):
+ ret.append(child)
+ return(ret)
+
+
+def getProperties(environString):
+ dom = minidom.parseString(environString)
+ if dom.documentElement.localName != "Environment":
+ raise Exception("No Environment Node")
+
+ if not dom.documentElement.hasChildNodes():
+ raise Exception("No Child Nodes")
+
+ envNsURI = "http://schemas.dmtf.org/ovf/environment/1"
+
+ # could also check here that elem.namespaceURI ==
+ # "http://schemas.dmtf.org/ovf/environment/1"
+ propSections = findChild(dom.documentElement,
+ lambda n: n.localName == "PropertySection")
+
+ if len(propSections) == 0:
+ raise Exception("No 'PropertySection's")
+
+ props = {}
+ propElems = findChild(propSections[0], lambda n: n.localName == "Property")
+
+ for elem in propElems:
+ key = elem.attributes.getNamedItemNS(envNsURI, "key").value
+ val = elem.attributes.getNamedItemNS(envNsURI, "value").value
+ props[key] = val
+
+ return(props)
+
+
+datasources = (
+ (DataSourceOVF, (DataSource.DEP_FILESYSTEM, )),
+ (DataSourceOVFNet,
+ (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+)
+
+
+# return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return(DataSource.list_from_depends(depends, datasources))
+
+
+if __name__ == "__main__":
+ def main():
+ import sys
+ envStr = open(sys.argv[1]).read()
+ props = getProperties(envStr)
+ import pprint
+ pprint.pprint(props)
+
+ md, ud, cfg = read_ovf_environment(envStr)
+ print "=== md ==="
+ pprint.pprint(md)
+ print "=== ud ==="
+ pprint.pprint(ud)
+ print "=== cfg ==="
+ pprint.pprint(cfg)
+
+ main()
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/sources/__init__.py