summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAzure.py34
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py44
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py442
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py133
-rw-r--r--cloudinit/sources/__init__.py18
5 files changed, 628 insertions, 43 deletions
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 66d7728b..b18c57e7 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -44,8 +44,21 @@ BUILTIN_DS_CONFIG = {
'policy': True,
'command': BOUNCE_COMMAND,
'hostname_command': 'hostname',
- }
+ },
+ 'disk_aliases': {'ephemeral0': '/dev/sdb'},
}
+
+BUILTIN_CLOUD_CONFIG = {
+ 'disk_setup': {
+ 'ephemeral0': {'table_type': 'mbr',
+ 'layout': True,
+ 'overwrite': False}
+ },
+ 'fs_setup': [{'filesystem': 'ext4',
+ 'device': 'ephemeral0.1',
+ 'replace_fs': 'ntfs'}]
+}
+
DS_CFG_PATH = ['datasource', DS_NAME]
@@ -94,7 +107,7 @@ class DataSourceAzureNet(sources.DataSource):
(md, self.userdata_raw, cfg, files) = ret
self.seed = cdev
self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
- self.cfg = cfg
+ self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
found = cdev
LOG.debug("found datasource in %s", cdev)
@@ -106,9 +119,14 @@ class DataSourceAzureNet(sources.DataSource):
if found == ddir:
LOG.debug("using files cached in %s", ddir)
+ # azure / hyper-v provides random data here
+ seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
+ if seed:
+ self.metadata['random_seed'] = seed
+
# now update ds_cfg to reflect contents pass in config
- usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
- self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg])
+ user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
+ self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
mycfg = self.ds_cfg
# walinux agent writes files world readable, but expects
@@ -156,9 +174,11 @@ class DataSourceAzureNet(sources.DataSource):
pubkeys = pubkeys_from_crt_files(fp_files)
self.metadata['public-keys'] = pubkeys
-
return True
+ def device_name_to_device(self, name):
+ return self.ds_cfg['disk_aliases'].get(name)
+
def get_config_obj(self):
return self.cfg
@@ -344,7 +364,7 @@ def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
except Exception as e:
- raise NonAzureDataSource("invalid xml: %s" % e)
+ raise BrokenAzureDataSource("invalid xml: %s" % e)
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
@@ -390,7 +410,7 @@ def read_azure_ovf(contents):
simple = True
value = child.childNodes[0].wholeText
- attrs = {k: v for k, v in child.attributes.items()}
+ attrs = dict([(k, v) for k, v in child.attributes.items()])
# we accept either UserData or CustomData. If both are present
# then behavior is undefined.
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 835f2a9a..4f437244 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import base64
import json
import os
@@ -41,6 +42,25 @@ DEFAULT_METADATA = {
VALID_DSMODES = ("local", "net", "pass", "disabled")
+class ConfigDriveHelper(object):
+ def __init__(self, distro):
+ self.distro = distro
+
+ def on_first_boot(self, data):
+ if not data:
+ data = {}
+ if 'network_config' in data:
+ LOG.debug("Updating network interfaces from config drive")
+ self.distro.apply_network(data['network_config'])
+ files = data.get('files')
+ if files:
+ LOG.debug("Writing %s injected files", len(files))
+ try:
+ write_files(files)
+ except IOError:
+ util.logexc(LOG, "Failed writing files")
+
+
class DataSourceConfigDrive(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -49,6 +69,7 @@ class DataSourceConfigDrive(sources.DataSource):
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
self.ec2_metadata = None
+ self.helper = ConfigDriveHelper(distro)
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -187,20 +208,8 @@ class DataSourceConfigDrive(sources.DataSource):
# instance-id
prev_iid = get_previous_iid(self.paths)
cur_iid = md['instance-id']
-
- if ('network_config' in results and self.dsmode == "local" and
- prev_iid != cur_iid):
- LOG.debug("Updating network interfaces from config drive (%s)",
- dsmode)
- self.distro.apply_network(results['network_config'])
-
- # file writing occurs in local mode (to be as early as possible)
- if self.dsmode == "local" and prev_iid != cur_iid and results['files']:
- LOG.debug("writing injected files")
- try:
- write_files(results['files'])
- except:
- util.logexc(LOG, "Failed writing files")
+ if prev_iid != cur_iid and self.dsmode == "local":
+ self.helper.on_first_boot(results)
# dsmode != self.dsmode here if:
# * dsmode = "pass", pass means it should only copy files and then
@@ -338,6 +347,13 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
except KeyError:
raise BrokenConfigDriveDir("No uuid entry in metadata")
+ if 'random_seed' in results['metadata']:
+ random_seed = results['metadata']['random_seed']
+ try:
+ results['metadata']['random_seed'] = base64.b64decode(random_seed)
+ except (ValueError, TypeError) as exc:
+ raise BrokenConfigDriveDir("Badly formatted random_seed: %s" % exc)
+
def read_content_path(item):
# do not use os.path.join here, as content_path starts with /
cpath = os.path.sep.join((source_dir, "openstack",
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
new file mode 100644
index 00000000..07dc25ff
--- /dev/null
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -0,0 +1,442 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
+# Copyright (C) 2012-2013 CERIT Scientific Cloud
+# Copyright (C) 2012-2013 OpenNebula.org
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Vlastimil Holer <xholer@mail.muni.cz>
+# Author: Javier Fontan <jfontan@opennebula.org>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pwd
+import re
+import string # pylint: disable=W0402
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_IID = "iid-dsopennebula"
+DEFAULT_MODE = 'net'
+DEFAULT_PARSEUSER = 'nobody'
+CONTEXT_DISK_FILES = ["context.sh"]
+VALID_DSMODES = ("local", "net", "disabled")
+
+
+class DataSourceOpenNebula(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.dsmode = 'local'
+ self.seed = None
+ self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
+
+ def __str__(self):
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
+
+ def get_data(self):
+ defaults = {"instance-id": DEFAULT_IID}
+ results = None
+ seed = None
+
+ # decide parseuser for context.sh shell reader
+ parseuser = DEFAULT_PARSEUSER
+ if 'parseuser' in self.ds_cfg:
+ parseuser = self.ds_cfg.get('parseuser')
+
+ candidates = [self.seed_dir]
+ candidates.extend(find_candidate_devs())
+ for cdev in candidates:
+ try:
+ if os.path.isdir(self.seed_dir):
+ results = read_context_disk_dir(cdev, asuser=parseuser)
+ elif cdev.startswith("/dev"):
+ results = util.mount_cb(cdev, read_context_disk_dir,
+ data=parseuser)
+ except NonContextDiskDir:
+ continue
+ except BrokenContextDiskDir as exc:
+ raise exc
+ except util.MountFailedError:
+ LOG.warn("%s was not mountable" % cdev)
+
+ if results:
+ seed = cdev
+ LOG.debug("found datasource in %s", cdev)
+ break
+
+ if not seed:
+ return False
+
+ # merge fetched metadata with datasource defaults
+ md = results['metadata']
+ md = util.mergemanydict([md, defaults])
+
+ # check for valid user specified dsmode
+ user_dsmode = results['metadata'].get('DSMODE', None)
+ if user_dsmode not in VALID_DSMODES + (None,):
+ LOG.warn("user specified invalid mode: %s", user_dsmode)
+ user_dsmode = None
+
+ # decide dsmode
+ if user_dsmode:
+ dsmode = user_dsmode
+ elif self.ds_cfg.get('dsmode'):
+ dsmode = self.ds_cfg.get('dsmode')
+ else:
+ dsmode = DEFAULT_MODE
+
+ if dsmode == "disabled":
+ # most likely user specified
+ return False
+
+ # apply static network configuration only in 'local' dsmode
+ if ('network-interfaces' in results and self.dsmode == "local"):
+ LOG.debug("Updating network interfaces from %s", self)
+ self.distro.apply_network(results['network-interfaces'])
+
+ if dsmode != self.dsmode:
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+ return False
+
+ self.seed = seed
+ self.metadata = md
+ self.userdata_raw = results.get('userdata')
+ return True
+
+ def get_hostname(self, fqdn=False, resolve_ip=None):
+ if resolve_ip is None:
+ if self.dsmode == 'net':
+ resolve_ip = True
+ else:
+ resolve_ip = False
+ return sources.DataSource.get_hostname(self, fqdn, resolve_ip)
+
+
+class DataSourceOpenNebulaNet(DataSourceOpenNebula):
+ def __init__(self, sys_cfg, distro, paths):
+ DataSourceOpenNebula.__init__(self, sys_cfg, distro, paths)
+ self.dsmode = 'net'
+
+
+class NonContextDiskDir(Exception):
+ pass
+
+
+class BrokenContextDiskDir(Exception):
+ pass
+
+
+class OpenNebulaNetwork(object):
+ REG_DEV_MAC = re.compile(
+ r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
+ re.MULTILINE | re.DOTALL)
+
+ def __init__(self, ip, context):
+ self.ip = ip
+ self.context = context
+ self.ifaces = self.get_ifaces()
+
+ def get_ifaces(self):
+ return self.REG_DEV_MAC.findall(self.ip)
+
+ def mac2ip(self, mac):
+ components = mac.split(':')[2:]
+ return [str(int(c, 16)) for c in components]
+
+ def get_ip(self, dev, components):
+ var_name = dev.upper() + '_IP'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return '.'.join(components)
+
+ def get_mask(self, dev):
+ var_name = dev.upper() + '_MASK'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return '255.255.255.0'
+
+ def get_network(self, dev, components):
+ var_name = dev.upper() + '_NETWORK'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return '.'.join(components[:-1]) + '.0'
+
+ def get_gateway(self, dev):
+ var_name = dev.upper() + '_GATEWAY'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return None
+
+ def get_dns(self, dev):
+ var_name = dev.upper() + '_DNS'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return None
+
+ def get_domain(self, dev):
+ var_name = dev.upper() + '_DOMAIN'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return None
+
+ def gen_conf(self):
+ global_dns = []
+ if 'DNS' in self.context:
+ global_dns.append(self.context['DNS'])
+
+ conf = []
+ conf.append('auto lo')
+ conf.append('iface lo inet loopback')
+ conf.append('')
+
+ for i in self.ifaces:
+ dev = i[0]
+ mac = i[1]
+ ip_components = self.mac2ip(mac)
+
+ conf.append('auto ' + dev)
+ conf.append('iface ' + dev + ' inet static')
+ conf.append(' address ' + self.get_ip(dev, ip_components))
+ conf.append(' network ' + self.get_network(dev, ip_components))
+ conf.append(' netmask ' + self.get_mask(dev))
+
+ gateway = self.get_gateway(dev)
+ if gateway:
+ conf.append(' gateway ' + gateway)
+
+ domain = self.get_domain(dev)
+ if domain:
+ conf.append(' dns-search ' + domain)
+
+ # add global DNS servers to all interfaces
+ dns = self.get_dns(dev)
+ if global_dns or dns:
+ all_dns = global_dns
+ if dns:
+ all_dns.append(dns)
+ conf.append(' dns-nameservers ' + ' '.join(all_dns))
+
+ conf.append('')
+
+ return "\n".join(conf)
+
+
+def find_candidate_devs():
+ """
+ Return a list of devices that may contain the context disk.
+ """
+ combined = []
+ for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'):
+ devs = util.find_devs_with(f)
+ devs.sort()
+ for d in devs:
+ if d not in combined:
+ combined.append(d)
+
+ return combined
+
+
+def switch_user_cmd(user):
+ return ['sudo', '-u', user]
+
+
+def parse_shell_config(content, keylist=None, bash=None, asuser=None,
+ switch_user_cb=None):
+
+ if isinstance(bash, str):
+ bash = [bash]
+ elif bash is None:
+ bash = ['bash', '-e']
+
+ if switch_user_cb is None:
+ switch_user_cb = switch_user_cmd
+
+ # allvars expands to all existing variables by using '${!x*}' notation
+ # where x is lower or upper case letters or '_'
+ allvars = ["${!%s*}" % x for x in string.letters + "_"]
+
+ keylist_in = keylist
+ if keylist is None:
+ keylist = allvars
+ keylist_in = []
+
+ setup = '\n'.join(('__v="";', '',))
+
+ def varprinter(vlist):
+ # output '\0'.join(['_start_', key=value NULL for vars in vlist]
+ return '\n'.join((
+ 'printf "%s\\0" _start_',
+ 'for __v in %s; do' % ' '.join(vlist),
+ ' printf "%s=%s\\0" "$__v" "${!__v}";',
+ 'done',
+ ''
+ ))
+
+ # the rendered 'bcmd' is bash syntax that does
+ # setup: declare variables we use (so they show up in 'all')
+ # varprinter(allvars): print all variables known at beginning
+ # content: execute the provided content
+ # varprinter(keylist): print all variables known after content
+ #
+ # output is then a null terminated array of:
+ # literal '_start_'
+ # key=value (for each preset variable)
+ # literal '_start_'
+ # key=value (for each post set variable)
+ bcmd = ('unset IFS\n' +
+ setup +
+ varprinter(allvars) +
+ '{\n%s\n\n:\n} > /dev/null\n' % content +
+ 'unset IFS\n' +
+ varprinter(keylist) + "\n")
+
+ cmd = []
+ if asuser is not None:
+ cmd = switch_user_cb(asuser)
+
+ cmd.extend(bash)
+
+ (output, _error) = util.subp(cmd, data=bcmd)
+
+ # exclude vars in bash that change on their own or that we used
+ excluded = ("RANDOM", "LINENO", "_", "__v")
+ preset = {}
+ ret = {}
+ target = None
+ output = output[0:-1] # remove trailing null
+
+ # go through output. First _start_ is for 'preset', second for 'target'.
+ # Add to target only things were changed and not in volitile
+ for line in output.split("\x00"):
+ try:
+ (key, val) = line.split("=", 1)
+ if target is preset:
+ target[key] = val
+ elif (key not in excluded and
+ (key in keylist_in or preset.get(key) != val)):
+ ret[key] = val
+ except ValueError:
+ if line != "_start_":
+ raise
+ if target is None:
+ target = preset
+ elif target is preset:
+ target = ret
+
+ return ret
+
+
+def read_context_disk_dir(source_dir, asuser=None):
+ """
+ read_context_disk_dir(source_dir):
+ read source_dir and return a tuple with metadata dict and user-data
+ string populated. If not a valid dir, raise a NonContextDiskDir
+ """
+ found = {}
+ for af in CONTEXT_DISK_FILES:
+ fn = os.path.join(source_dir, af)
+ if os.path.isfile(fn):
+ found[af] = fn
+
+ if not found:
+ raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
+
+ context = {}
+ results = {'userdata': None, 'metadata': {}}
+
+ if "context.sh" in found:
+ if asuser is not None:
+ try:
+ pwd.getpwnam(asuser)
+ except KeyError as e:
+ raise BrokenContextDiskDir("configured user '%s' "
+ "does not exist", asuser)
+ try:
+ with open(os.path.join(source_dir, 'context.sh'), 'r') as f:
+ content = f.read().strip()
+
+ context = parse_shell_config(content, asuser=asuser)
+ except util.ProcessExecutionError as e:
+ raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
+ except IOError as e:
+ raise NonContextDiskDir("Error reading context.sh: %s" % (e))
+ else:
+ raise NonContextDiskDir("Missing context.sh")
+
+ if not context:
+ return results
+
+ results['metadata'] = context
+
+ # process single or multiple SSH keys
+ ssh_key_var = None
+ if "SSH_KEY" in context:
+ ssh_key_var = "SSH_KEY"
+ elif "SSH_PUBLIC_KEY" in context:
+ ssh_key_var = "SSH_PUBLIC_KEY"
+
+ if ssh_key_var:
+ lines = context.get(ssh_key_var).splitlines()
+ results['metadata']['public-keys'] = [l for l in lines
+ if len(l) and not l.startswith("#")]
+
+ # custom hostname -- try hostname or leave cloud-init
+ # itself create hostname from IP address later
+ for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ if k in context:
+ results['metadata']['local-hostname'] = context[k]
+ break
+
+ # raw user data
+ if "USER_DATA" in context:
+ results['userdata'] = context["USER_DATA"]
+ elif "USERDATA" in context:
+ results['userdata'] = context["USERDATA"]
+
+ # generate static /etc/network/interfaces
+ # only if there are any required context variables
+ # http://opennebula.org/documentation:rel3.8:cong#network_configuration
+ for k in context.keys():
+ if re.match(r'^ETH\d+_IP$', k):
+ (out, _) = util.subp(['/sbin/ip', 'link'])
+ net = OpenNebulaNetwork(out, context)
+ results['network-interfaces'] = net.gen_conf()
+ break
+
+ return results
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOpenNebulaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 1ce20c10..551b20c4 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -27,6 +27,7 @@
#
+import base64
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
@@ -35,8 +36,6 @@ import os.path
import serial
-DEF_TTY_LOC = '/dev/ttyS1'
-DEF_TTY_TIMEOUT = 60
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
@@ -47,17 +46,66 @@ SMARTOS_ATTRIB_MAP = {
'user-data': ('user-data', False),
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
+ 'availability_zone': ('datacenter_name', True),
+}
+
+DS_NAME = 'SmartOS'
+DS_CFG_PATH = ['datasource', DS_NAME]
+# BUILT-IN DATASOURCE CONFIGURATION
+# The following is the built-in configuration. If the values
+# are not set via the system configuration, then these default
+# will be used:
+# serial_device: which serial device to use for the meta-data
+# seed_timeout: how long to wait on the device
+# no_base64_decode: values which are not base64 encoded and
+# are fetched directly from SmartOS, not meta-data values
+# base64_keys: meta-data keys that are delivered in base64
+# base64_all: with the exclusion of no_base64_decode values,
+# treat all meta-data as base64 encoded
+# disk_setup: describes how to partition the ephemeral drive
+# fs_setup: describes how to format the ephemeral drive
+#
+BUILTIN_DS_CONFIG = {
+ 'serial_device': '/dev/ttyS1',
+ 'seed_timeout': 60,
+ 'no_base64_decode': ['root_authorized_keys',
+ 'motd_sys_info',
+ 'iptables_disable'],
+ 'base64_keys': [],
+ 'base64_all': False,
+ 'disk_aliases': {'ephemeral0': '/dev/vdb'},
+}
+
+BUILTIN_CLOUD_CONFIG = {
+ 'disk_setup': {
+ 'ephemeral0': {'table_type': 'mbr',
+ 'layout': False,
+ 'overwrite': False}
+ },
+ 'fs_setup': [{'label': 'ephemeral0',
+ 'filesystem': 'ext3',
+ 'device': 'ephemeral0'}],
}
class DataSourceSmartOS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'sdc')
self.is_smartdc = None
- self.seed = self.sys_cfg.get("serial_device", DEF_TTY_LOC)
- self.seed_timeout = self.sys_cfg.get("serial_timeout",
- DEF_TTY_TIMEOUT)
+
+ self.ds_cfg = util.mergemanydict([
+ self.ds_cfg,
+ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
+ BUILTIN_DS_CONFIG])
+
+ self.metadata = {}
+ self.cfg = BUILTIN_CLOUD_CONFIG
+
+ self.seed = self.ds_cfg.get("serial_device")
+ self.seed_timeout = self.ds_cfg.get("serial_timeout")
+ self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
+ self.b64_keys = self.ds_cfg.get('base64_keys')
+ self.b64_all = self.ds_cfg.get('base64_all')
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -70,7 +118,6 @@ class DataSourceSmartOS(sources.DataSource):
if not os.path.exists(self.seed):
LOG.debug("Host does not appear to be on SmartOS")
return False
- self.seed = self.seed
dmi_info = dmi_data()
if dmi_info is False:
@@ -79,35 +126,60 @@ class DataSourceSmartOS(sources.DataSource):
system_uuid, system_type = dmi_info
if 'smartdc' not in system_type.lower():
- LOG.debug("Host is not on SmartOS")
+ LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
return False
self.is_smartdc = True
md['instance-id'] = system_uuid
+ b64_keys = self.query('base64_keys', strip=True, b64=False)
+ if b64_keys is not None:
+ self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
+
+ b64_all = self.query('base64_all', strip=True, b64=False)
+ if b64_all is not None:
+ self.b64_all = util.is_true(b64_all)
+
for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
smartos_noun, strip = attribute
- md[ci_noun] = query_data(smartos_noun, self.seed,
- self.seed_timeout, strip=strip)
+ md[ci_noun] = self.query(smartos_noun, strip=strip)
if not md['local-hostname']:
md['local-hostname'] = system_uuid
+ ud = None
if md['user-data']:
ud = md['user-data']
- else:
+ elif md['user-script']:
ud = md['user-script']
- self.metadata = md
+ self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
return True
+ def device_name_to_device(self, name):
+ return self.ds_cfg['disk_aliases'].get(name)
+
+ def get_config_obj(self):
+ return self.cfg
+
def get_instance_id(self):
return self.metadata['instance-id']
+ def query(self, noun, strip=False, default=None, b64=None):
+ if b64 is None:
+ if noun in self.smartos_no_base64:
+ b64 = False
+ elif self.b64_all or noun in self.b64_keys:
+ b64 = True
+
+ return query_data(noun=noun, strip=strip, seed_device=self.seed,
+ seed_timeout=self.seed_timeout, default=default,
+ b64=b64)
+
def get_serial(seed_device, seed_timeout):
"""This is replaced in unit testing, allowing us to replace
- serial.Serial with a mocked class
+ serial.Serial with a mocked class.
The timeout value of 60 seconds should never be hit. The value
is taken from SmartOS own provisioning tools. Since we are reading
@@ -124,12 +196,18 @@ def get_serial(seed_device, seed_timeout):
return ser
-def query_data(noun, seed_device, seed_timeout, strip=False):
+def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
+ b64=None):
"""Makes a request to via the serial console via "GET <NOUN>"
In the response, the first line is the status, while subsequent lines
are is the value. A blank line with a "." is used to indicate end of
response.
+
+ If the response is expected to be base64 encoded, then set b64encoded
+ to true. Unfortantely, there is no way to know if something is 100%
+ encoded, so this method relies on being told if the data is base64 or
+ not.
"""
if not noun:
@@ -143,7 +221,7 @@ def query_data(noun, seed_device, seed_timeout, strip=False):
if 'SUCCESS' not in status:
ser.close()
- return None
+ return default
while not eom_found:
m = ser.readline()
@@ -153,12 +231,27 @@ def query_data(noun, seed_device, seed_timeout, strip=False):
response.append(m)
ser.close()
- if not strip:
- return "".join(response)
+
+ if b64 is None:
+ b64 = query_data('b64-%s' % noun, seed_device=seed_device,
+ seed_timeout=seed_timeout, b64=False,
+ default=False, strip=True)
+ b64 = util.is_true(b64)
+
+ resp = None
+ if b64 or strip:
+ resp = "".join(response).rstrip()
else:
- return "".join(response).rstrip()
+ resp = "".join(response)
+
+ if b64:
+ try:
+ return base64.b64decode(resp)
+ except TypeError:
+ LOG.warn("Failed base64 decoding key '%s'", noun)
+ return resp
- return None
+ return resp
def dmi_data():
@@ -181,7 +274,7 @@ def dmi_data():
except Exception as e:
util.logexc(LOG, "Failed to get system UUID", e)
- return sys_uuid.lower(), sys_type
+ return (sys_uuid.lower().strip(), sys_type.strip())
# Used to match classes to dependencies
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 974c0407..7dc1fbde 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -53,9 +53,16 @@ class DataSource(object):
self.userdata = None
self.metadata = None
self.userdata_raw = None
+
+ # find the datasource config name.
+ # remove 'DataSource' from classname on front, and remove 'Net' on end.
+ # Both Foo and FooNet sources expect config in cfg['sources']['Foo']
name = type_utils.obj_name(self)
if name.startswith(DS_PREFIX):
name = name[len(DS_PREFIX):]
+ if name.endswith('Net'):
+ name = name[0:-3]
+
self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
("datasource", name), {})
if not ud_proc:
@@ -144,7 +151,7 @@ class DataSource(object):
return "iid-datasource"
return str(self.metadata['instance-id'])
- def get_hostname(self, fqdn=False):
+ def get_hostname(self, fqdn=False, resolve_ip=False):
defdomain = "localdomain"
defhost = "localhost"
domain = defdomain
@@ -168,7 +175,14 @@ class DataSource(object):
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
if util.is_ipv4(lhost):
- toks = ["ip-%s" % lhost.replace(".", "-")]
+ toks = []
+ if resolve_ip:
+ toks = util.gethostbyaddr(lhost)
+
+ if toks:
+ toks = str(toks).split('.')
+ else:
+ toks = ["ip-%s" % lhost.replace(".", "-")]
else:
toks = lhost.split(".")