summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cloudinit/distros/__init__.py4
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py366
-rw-r--r--cloudinit/sources/__init__.py11
-rw-r--r--cloudinit/util.py7
-rw-r--r--doc/sources/opennebula/README66
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py163
7 files changed, 616 insertions, 2 deletions
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 0db4aac7..35577466 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -64,6 +64,10 @@ class Distro(object):
# to write this blob out in a distro format
raise NotImplementedError()
+ def apply_resolv_conf(self, settings):
+ net_fn = self._paths.join(False, "/etc/resolv.conf")
+ util.write_file(net_fn, settings)
+
def get_option(self, opt_name, default=None):
return self._cfg.get(opt_name, default)
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 8cc9e3b4..4b95b5b7 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -31,6 +31,7 @@ CFG_BUILTIN = {
'datasource_list': [
'NoCloud',
'ConfigDrive',
+ 'OpenNebula',
'AltCloud',
'OVF',
'MAAS',
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
new file mode 100644
index 00000000..dad64bd4
--- /dev/null
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -0,0 +1,366 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
+# Copyright (C) 2012-2013 CERIT Scientific Cloud
+# Copyright (C) 2012 OpenNebula.org
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Vlastimil Holer <xholer@mail.muni.cz>
+# Author: Javier Fontan <jfontan@opennebula.org>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+import subprocess
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_IID = "iid-dsopennebula"
+CONTEXT_DISK_FILES = ["context.sh"]
+VALID_DSMODES = ("local", "net", "disabled")
+
+class DataSourceOpenNebula(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.dsmode = 'local'
+ self.seed = None
+ self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
+
+ def __str__(self):
+ mstr = "%s [seed=%s][dsmode=%s]" % (util.obj_name(self),
+ self.seed, self.dsmode)
+ return mstr
+
+ def get_data(self):
+ defaults = {
+ "instance-id": DEFAULT_IID,
+ "dsmode": self.dsmode,
+ }
+
+ found = None
+ md = {}
+
+ results = {}
+ if os.path.isdir(self.seed_dir):
+ try:
+ results=read_context_disk_dir(self.seed_dir)
+ found = self.seed_dir
+ except NonContextDiskDir:
+ util.logexc(LOG, "Failed reading context disk from %s",
+ self.seed_dir)
+ if not found:
+ devlist = find_candidate_devs()
+ for dev in devlist:
+ try:
+ results = util.mount_cb(dev, read_context_disk_dir)
+ found = dev
+ break
+ except (NonContextDiskDir, util.MountFailedError):
+ pass
+
+ if not found:
+ return False
+
+ md = results['metadata']
+ md = util.mergedict(md, defaults)
+
+ dsmode = results.get('dsmode', None)
+ if dsmode not in VALID_DSMODES + (None,):
+ LOG.warn("user specified invalid mode: %s" % dsmode)
+ dsmode = None
+
+ if (dsmode is None) and self.ds_cfg.get('dsmode'):
+ dsmode = self.ds_cfg.get('dsmode')
+ else:
+ dsmode = self.dsmode
+
+ if dsmode == "disabled":
+ # most likely user specified
+ return False
+
+ if dsmode != self.dsmode:
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+ return False
+
+ self.seed = found
+ self.metadata = md
+ self.userdata_raw = results.get('userdata')
+
+ # apply static network configuration only in 'local' dsmode
+ if ('network-interfaces' in results and self.dsmode == "local"):
+ LOG.debug("Updating network interfaces from %s", self)
+ self.distro.apply_network(results['network-interfaces'])
+
+ return True
+
+ def get_hostname(self, fqdn=False, resolve_ip=None):
+ if resolve_ip is None:
+ if self.dsmode == 'net':
+ resolve_ip = True
+ else:
+ resolve_ip = False
+ return sources.DataSource.get_hostname(self, fqdn, resolve_ip)
+
+
+class DataSourceOpenNebulaNet(DataSourceOpenNebula):
+ def __init__(self, sys_cfg, distro, paths):
+ DataSourceOpenNebula.__init__(self, sys_cfg, distro, paths)
+ self.dsmode = 'net'
+
+
+class NonContextDiskDir(Exception):
+ pass
+
+
+class OpenNebulaNetwork(object):
+ REG_DEV_MAC=re.compile('^\d+: (eth\d+):.*link\/ether (..:..:..:..:..:..) ')
+
+ def __init__(self, ip, context_sh):
+ self.ip=ip
+ self.context_sh=context_sh
+ self.ifaces=self.get_ifaces()
+
+ def get_ifaces(self):
+ return [self.REG_DEV_MAC.search(f).groups() for f in self.ip.split("\n") if self.REG_DEV_MAC.match(f)]
+
+ def mac2ip(self, mac):
+ components=mac.split(':')[2:]
+
+ return [str(int(c, 16)) for c in components]
+
+ def get_ip(self, dev, components):
+ var_name=dev+'_ip'
+ if var_name in self.context_sh:
+ return self.context_sh[var_name]
+ else:
+ return '.'.join(components)
+
+ def get_mask(self, dev, components):
+ var_name=dev+'_mask'
+ if var_name in self.context_sh:
+ return self.context_sh[var_name]
+ else:
+ return '255.255.255.0'
+
+ def get_network(self, dev, components):
+ var_name=dev+'_network'
+ if var_name in self.context_sh:
+ return self.context_sh[var_name]
+ else:
+ return '.'.join(components[:-1])+'.0'
+
+ def get_gateway(self, dev, components):
+ var_name=dev+'_gateway'
+ if var_name in self.context_sh:
+ return self.context_sh[var_name]
+ else:
+ None
+
+ def get_dns(self, dev, components):
+ var_name=dev+'_dns'
+ if var_name in self.context_sh:
+ return self.context_sh[var_name]
+ else:
+ None
+
+ def get_domain(self, dev, components):
+ var_name=dev+'_domain'
+ if var_name in self.context_sh:
+ return self.context_sh[var_name]
+ else:
+ None
+
+ def gen_conf(self):
+ global_dns=[]
+ if 'dns' in self.context_sh:
+ global_dns.append(self.context_sh['dns'])
+
+ conf=[]
+ conf.append('auto lo')
+ conf.append('iface lo inet loopback')
+ conf.append('')
+
+ for i in self.ifaces:
+ dev=i[0]
+ mac=i[1]
+ ip_components=self.mac2ip(mac)
+
+ conf.append('auto '+dev)
+ conf.append('iface '+dev+' inet static')
+ conf.append(' address '+self.get_ip(dev, ip_components))
+ conf.append(' network '+self.get_network(dev, ip_components))
+ conf.append(' netmask '+self.get_mask(dev, ip_components))
+
+ gateway=self.get_gateway(dev, ip_components)
+ if gateway:
+ conf.append(' gateway '+gateway)
+
+ domain=self.get_domain(dev, ip_components)
+ if domain:
+ conf.append(' dns-search '+domain)
+
+ # add global DNS servers to all interfaces
+ dns=self.get_dns(dev, ip_components)
+ if global_dns or dns:
+ all_dns=global_dns
+ if dns:
+ all_dns.append(dns)
+ conf.append(' dns-nameservers '+' '.join(all_dns))
+
+ conf.append('')
+
+ return "\n".join(conf)
+
+
+def find_candidate_devs():
+ """
+ Return a list of devices that may contain the context disk.
+ """
+ by_fstype = util.find_devs_with("TYPE=iso9660")
+ by_label = util.find_devs_with("LABEL=CDROM")
+
+ by_fstype.sort()
+ by_label.sort()
+
+ # combine list of items by putting by-label items first
+ # followed by fstype items, but with dupes removed
+ combined = (by_label + [d for d in by_fstype if d not in by_label])
+
+ return combined
+
+
+def read_context_disk_dir(source_dir):
+ """
+ read_context_disk_dir(source_dir):
+ read source_dir and return a tuple with metadata dict and user-data
+ string populated. If not a valid dir, raise a NonContextDiskDir
+ """
+
+ found = {}
+ for af in CONTEXT_DISK_FILES:
+ fn = os.path.join(source_dir, af)
+ if os.path.isfile(fn):
+ found[af] = fn
+
+ if len(found) == 0:
+ raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
+
+ context_sh = {}
+ results = {
+ 'userdata':None,
+ 'metadata':{},
+ }
+
+ if "context.sh" in found:
+ try:
+ # Note: context.sh is a "shell" script with defined context
+ # variables, like: X="Y" . It's ready to use as a shell source
+ # e.g.: ". context.sh" and as a shell script it can also reference
+ # to already defined shell variables. So to have same context var.
+ # values as we can have in custom shell script, we use bash itself
+ # to read context.sh and dump variables in easily parsable way.
+ #
+ # normalized variables dump format (get by cmd "set"):
+ # 1. simple single word assignment ........ X=Y
+ # 2. multiword assignment ................. X='Y Z'
+ # 3. assignments with backslash escapes ... X=$'Y\nZ'
+ #
+ # how context variables are read:
+ # 1. list existing ("old") shell variables and store into $VARS
+ # 2. read context variables
+ # 3. use comm to filter "old" variables from all current
+ # variables and excl. few other vars with grep
+ BASH_CMD='VARS=`set | sort -u `;' \
+ 'source %s/context.sh;' \
+ 'comm -23 <(set | sort -u) <(echo "$VARS") | egrep -v "^(VARS|PIPESTATUS|_)="'
+
+ (out,err) = util.subp(['bash','--noprofile', '--norc',
+ '-c', BASH_CMD % (source_dir) ])
+
+ for (key,value) in [ l.split('=',1) for l in out.rstrip().split("\n") ]:
+ k=key.lower()
+
+ # with backslash escapes, e.g.
+ # X=$'Y\nZ'
+ r=re.match("^\$'(.*)'$",value)
+ if r:
+ context_sh[k]=r.group(1).decode('string_escape')
+ else:
+ # multiword values, e.g.:
+ # X='Y Z'
+ # X='Y'\''Z' for "Y'Z"
+ r=re.match("^'(.*)'$",value)
+ if r:
+ context_sh[k]=r.group(1).replace("'\\''","'")
+ else:
+ # simple values, e.g.:
+ # X=Y
+ context_sh[k]=value
+
+ except util.ProcessExecutionError as e:
+ raise NonContextDiskDir("Error reading context.sh: %s" % (e))
+
+ results['metadata']=context_sh
+ else:
+ raise NonContextDiskDir("Missing context.sh")
+
+ # process single or multiple SSH keys
+ ssh_key_var=None
+
+ if "ssh_key" in context_sh:
+ ssh_key_var="ssh_key"
+ elif "ssh_public_key" in context_sh:
+ ssh_key_var="ssh_public_key"
+
+ if ssh_key_var:
+ lines = context_sh.get(ssh_key_var).splitlines()
+ results['metadata']['public-keys'] = [l for l in lines
+ if len(l) and not l.startswith("#")]
+
+ # custom hostname -- try hostname or leave cloud-init
+ # itself create hostname from IP address later
+ for k in ('hostname','public_ip','ip_public','eth0_ip'):
+ if k in context_sh:
+ results['metadata']['local-hostname'] = context_sh[k]
+ break
+
+ # raw user data
+ if "user_data" in context_sh:
+ results['userdata'] = context_sh["user_data"]
+ elif "userdata" in context_sh:
+ results['userdata'] = context_sh["userdata"]
+
+ (out, err) = util.subp(['/sbin/ip', '-o', 'link'])
+ net=OpenNebulaNetwork(out, context_sh)
+ results['network-interfaces']=net.gen_conf()
+
+ return results
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOpenNebulaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 96baff90..0bad4c8b 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -139,7 +139,7 @@ class DataSource(object):
return "iid-datasource"
return str(self.metadata['instance-id'])
- def get_hostname(self, fqdn=False):
+ def get_hostname(self, fqdn=False, resolve_ip=False):
defdomain = "localdomain"
defhost = "localhost"
domain = defdomain
@@ -163,7 +163,14 @@ class DataSource(object):
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
if util.is_ipv4(lhost):
- toks = ["ip-%s" % lhost.replace(".", "-")]
+ toks = []
+ if resolve_ip:
+ toks = util.gethostbyaddr(lhost)
+
+ if toks:
+ toks = toks.split('.')
+ else:
+ toks = ["ip-%s" % lhost.replace(".", "-")]
else:
toks = lhost.split(".")
diff --git a/cloudinit/util.py b/cloudinit/util.py
index ffe844b2..7b1202a2 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -901,6 +901,13 @@ def get_hostname():
return hostname
+def gethostbyaddr(ip):
+ try:
+ return socket.gethostbyaddr(ip)[0]
+ except socket.herror:
+ return None
+
+
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return (is_resolvable(urlparse.urlparse(url).hostname))
diff --git a/doc/sources/opennebula/README b/doc/sources/opennebula/README
new file mode 100644
index 00000000..772a5b99
--- /dev/null
+++ b/doc/sources/opennebula/README
@@ -0,0 +1,66 @@
+The 'OpenNebula' DataSource supports the OpenNebula contextualization disk.
+
+The following criteria are required to be identified by
+DataSourceOpenNebula as contextualization disk:
+ * must be formatted with iso9660 filesystem or labeled as CDROM
+ * must be un-partitioned block device (/dev/vdb, not /dev/vdb1)
+ * must contain
+ * context.sh
+
+== Content of config-drive ==
+ * context.sh
+ This is the only mandatory file on context disk, the rest content depends
+ on contextualization parameter FILES and thus is optional. It's
+ a shell script defining all context parameters. This script is
+ processed by bash (/bin/bash) to simulate behaviour of common
+ OpenNebula context scripts. Processed variables are handed over
+ back to cloud-init for further processing.
+
+== Configuration ==
+Cloud-init's behaviour can be modified by context variables found
+in the context.sh file in the folowing ways (variable names are
+case-insensitive):
+ * dsmode:
+ values: local, net, disabled
+ default: None
+
+ Tells if this datasource will be processed in local (pre-networking) or
+ net (post-networking) stage or even completely disabled.
+
+ * ssh_key:
+ default: None
+ If present, these key(s) will be used as the public key(s) for
+ the instance. More keys can be specified in this single context
+ variable, but each key must be on it's own line. I.e. keys must
+ be separated by newlines.
+
+ * hostname:
+ default: None
+ Custom hostname for the instance.
+
+ * public_ip:
+ default: None
+ If hostname not specified, public_ip is used to DNS resolve hostname.
+
+ * 'user_data' or 'userdata':
+ default: None
+ This provides cloud-init user-data. See other documentation for what
+ all can be present here.
+
+== Example OpenNebula's Virtual Machine template ==
+
+CONTEXT=[
+ PUBLIC_IP="$NIC[IP]",
+ SSH_KEY="$USER[SSH_KEY]
+$USER[SSH_KEY1]
+$USER[SSH_KEY2] ",
+ USER_DATA="#cloud-config
+# see https://help.ubuntu.com/community/CloudInit
+
+packages: []
+
+mounts:
+- [vdc,none,swap,sw,0,0]
+runcmd:
+- echo 'Instance has been configured by cloud-init.' | wall
+" ]
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
new file mode 100644
index 00000000..af8bd347
--- /dev/null
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -0,0 +1,163 @@
+import os
+from mocker import MockerTestCase
+from cloudinit import util
+from cloudinit.sources import DataSourceOpenNebula as ds
+
+TEST_VARS={
+ 'var1': 'single',
+ 'var2': 'double word',
+ 'var3': 'multi\nline\n',
+ 'var4': "'single'",
+ 'var5': "'double word'",
+ 'var6': "'multi\nline\n'",
+ 'var7': 'single\\t',
+ 'var8': 'double\\tword',
+ 'var9': 'multi\\t\nline\n' }
+
+USER_DATA='#cloud-config\napt_upgrade: true'
+SSH_KEY='ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
+HOSTNAME='foo.example.com'
+PUBLIC_IP='10.0.0.3'
+
+CMD_IP_OUT='''\
+1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000\ link/ether 02:00:0a:12:01:01 brd ff:ff:ff:ff:ff:ff
+'''
+
+class TestOpenNebulaDataSource(MockerTestCase):
+
+ def setUp(self):
+ super(TestOpenNebulaDataSource, self).setUp()
+ self.tmp = self.makeDir()
+
+ def test_seed_dir_non_contextdisk(self):
+ my_d = os.path.join(self.tmp, 'non-contextdisk')
+ self.assertRaises(ds.NonContextDiskDir,ds.read_context_disk_dir,my_d)
+
+ def test_seed_dir_bad_context_sh(self):
+ my_d = os.path.join(self.tmp, 'bad-context-sh')
+ os.mkdir(my_d)
+ with open(os.path.join(my_d,"context.sh"), "w") as fp:
+ fp.write('/bin/false\n')
+ fp.close()
+ self.assertRaises(ds.NonContextDiskDir,ds.read_context_disk_dir,my_d)
+
+ def test_context_sh_parser(self):
+ my_d = os.path.join(self.tmp,'context-sh-parser')
+ populate_dir(my_d, TEST_VARS)
+ results=ds.read_context_disk_dir(my_d)
+
+ self.assertTrue('metadata' in results)
+ self.assertEqual(TEST_VARS,results['metadata'])
+
+ def test_ssh_key(self):
+ public_keys=[]
+ for c in range(4):
+ for k in ('SSH_KEY','SSH_PUBLIC_KEY'):
+ my_d = os.path.join(self.tmp, "%s-%i" % (k,c))
+ populate_dir(my_d, {k:'\n'.join(public_keys)})
+ results=ds.read_context_disk_dir(my_d)
+
+ self.assertTrue('metadata' in results)
+ self.assertTrue('public-keys' in results['metadata'])
+ self.assertEqual(public_keys,results['metadata']['public-keys'])
+
+ public_keys.append(SSH_KEY % (c+1,))
+
+ def test_user_data(self):
+ for k in ('USER_DATA','USERDATA'):
+ my_d = os.path.join(self.tmp, k)
+ populate_dir(my_d, {k:USER_DATA})
+ results=ds.read_context_disk_dir(my_d)
+
+ self.assertTrue('userdata' in results)
+ self.assertEqual(USER_DATA,results['userdata'])
+
+ def test_hostname(self):
+ for k in ('HOSTNAME','PUBLIC_IP','IP_PUBLIC','ETH0_IP'):
+ my_d = os.path.join(self.tmp, k)
+ populate_dir(my_d, {k:PUBLIC_IP})
+ results=ds.read_context_disk_dir(my_d)
+
+ self.assertTrue('metadata' in results)
+ self.assertTrue('local-hostname' in results['metadata'])
+ self.assertEqual(PUBLIC_IP,results['metadata']['local-hostname'])
+
+
+ def test_find_candidates(self):
+ devs_with_answers = {
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=CDROM": ["/dev/sr0"],
+ }
+
+ def my_devs_with(criteria):
+ return devs_with_answers[criteria]
+
+ try:
+ orig_find_devs_with = util.find_devs_with
+ util.find_devs_with = my_devs_with
+ self.assertEqual(["/dev/sr0","/dev/vdb"], ds.find_candidate_devs())
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+
+class TestOpenNebulaNetwork(MockerTestCase):
+
+ def setUp(self):
+ super(TestOpenNebulaNetwork, self).setUp()
+
+ def test_lo(self):
+ net=ds.OpenNebulaNetwork('',{})
+ self.assertEqual(net.gen_conf(),u'''\
+auto lo
+iface lo inet loopback
+''')
+
+ def test_eth0(self):
+ net=ds.OpenNebulaNetwork(CMD_IP_OUT,{})
+ self.assertEqual(net.gen_conf(),u'''\
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.18.1.1
+ network 10.18.1.0
+ netmask 255.255.255.0
+''')
+
+ def test_eth0_override(self):
+ context_sh = {
+ 'dns': '1.2.3.8',
+ 'eth0_ip':'1.2.3.4',
+ 'eth0_network':'1.2.3.0',
+ 'eth0_mask':'255.255.0.0',
+ 'eth0_gateway':'1.2.3.5',
+ 'eth0_domain':'example.com',
+ 'eth0_dns':'1.2.3.6 1.2.3.7'}
+
+ net=ds.OpenNebulaNetwork(CMD_IP_OUT,context_sh)
+ self.assertEqual(net.gen_conf(),u'''\
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 1.2.3.4
+ network 1.2.3.0
+ netmask 255.255.0.0
+ gateway 1.2.3.5
+ dns-search example.com
+ dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7
+''')
+
+
+def populate_dir(seed_dir, files):
+ os.mkdir(seed_dir)
+ with open(os.path.join(seed_dir,"context.sh"), "w") as fp:
+ fp.write("# Context variables generated by OpenNebula\n")
+ for (name, content) in files.iteritems():
+ fp.write('%s="%s"\n' % (name.upper(),content))
+ fp.close()
+
+# vi: ts=4 expandtab