summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/config/cc_apt_configure.py38
-rw-r--r--cloudinit/config/cc_growpart.py9
-rw-r--r--cloudinit/config/cc_resizefs.py11
-rw-r--r--cloudinit/distros/__init__.py9
-rw-r--r--cloudinit/distros/debian.py8
-rw-r--r--cloudinit/distros/rhel.py10
-rw-r--r--cloudinit/distros/sles.py10
-rw-r--r--cloudinit/handlers/__init__.py1
-rw-r--r--cloudinit/handlers/cloud_config.py35
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceAzure.py143
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py244
-rw-r--r--cloudinit/util.py58
13 files changed, 491 insertions, 86 deletions
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 3ce3b351..5a407016 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -27,7 +27,8 @@ from cloudinit import util
distros = ['ubuntu', 'debian']
PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
-PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
+APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config"
+APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
# A temporary shell program to get a given gpg key
# from a given keyserver
@@ -67,18 +68,10 @@ def handle(name, cfg, cloud, log, _args):
"security": "security.ubuntu.com/ubuntu"})
rename_apt_lists(old_mirrors, mirrors)
- # Set up any apt proxy
- proxy = cfg.get("apt_proxy", None)
- proxy_filename = PROXY_FN
- if proxy:
- try:
- # See man 'apt.conf'
- contents = PROXY_TPL % (proxy)
- util.write_file(proxy_filename, contents)
- except Exception as e:
- util.logexc(log, "Failed to write proxy to %s", proxy_filename)
- elif os.path.isfile(proxy_filename):
- util.del_file(proxy_filename)
+ try:
+ apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
+ except Exception as e:
+ log.warn("failed to proxy or apt config info: %s", e)
# Process 'apt_sources'
if 'apt_sources' in cfg:
@@ -256,3 +249,22 @@ def find_apt_mirror_info(cloud, cfg):
mirror_info.update({'primary': mirror})
return mirror_info
+
+
+def apply_apt_config(cfg, proxy_fname, config_fname):
+ # Set up any apt proxy
+ cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'),
+ ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'),
+ ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'),
+ ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";'))
+
+ proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
+ if len(proxies):
+ util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
+ elif os.path.isfile(proxy_fname):
+ util.del_file(proxy_fname)
+
+ if cfg.get('apt_config', None):
+ util.write_file(config_fname, cfg.get('apt_config'))
+ elif os.path.isfile(config_fname):
+ util.del_file(config_fname)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 4f8c8f80..2d54aabf 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -96,7 +96,7 @@ class ResizeParted(object):
def resize(self, diskdev, partnum, partdev):
before = get_size(partdev)
try:
- util.subp(["parted", "resizepart", diskdev, partnum])
+ util.subp(["parted", diskdev, "resizepart", partnum])
except util.ProcessExecutionError as e:
raise ResizeFailedException(e)
@@ -264,11 +264,14 @@ def handle(_name, cfg, _cloud, log, _args):
raise e
return
- resized = resize_devices(resizer, devices)
+ resized = util.log_time(logfunc=log.debug, msg="resize_devices",
+ func=resize_devices, args=(resizer, devices))
for (entry, action, msg) in resized:
if action == RESIZE.CHANGED:
log.info("'%s' resized: %s" % (entry, msg))
else:
log.debug("'%s' %s: %s" % (entry, action, msg))
-RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart))
+# LP: 1212444 FIXME re-order and favor ResizeParted
+#RESIZERS = (('growpart', ResizeGrowPart),)
+RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted))
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index b4ee16b2..56040fdd 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -21,7 +21,6 @@
import errno
import os
import stat
-import time
from cloudinit.settings import PER_ALWAYS
from cloudinit import util
@@ -120,9 +119,12 @@ def handle(name, cfg, _cloud, log, args):
if resize_root == NOBLOCK:
# Fork to a child that will run
# the resize command
- util.fork_cb(do_resize, resize_cmd, log)
+ util.fork_cb(
+ util.log_time(logfunc=log.debug, msg="backgrounded Resizing",
+ func=do_resize, args=(resize_cmd, log)))
else:
- do_resize(resize_cmd, log)
+ util.log_time(logfunc=log.debug, msg="Resizing",
+ func=do_resize, args=(resize_cmd, log))
action = 'Resized'
if resize_root == NOBLOCK:
@@ -132,13 +134,10 @@ def handle(name, cfg, _cloud, log, args):
def do_resize(resize_cmd, log):
- start = time.time()
try:
util.subp(resize_cmd)
except util.ProcessExecutionError:
util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
raise
- tot_time = time.time() - start
- log.debug("Resizing took %.3f seconds", tot_time)
# TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 249e1b19..74e95797 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -47,9 +47,11 @@ LOG = logging.getLogger(__name__)
class Distro(object):
__metaclass__ = abc.ABCMeta
+
hosts_fn = "/etc/hosts"
ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
hostname_conf_fn = "/etc/hostname"
+ tz_zone_dir = "/usr/share/zoneinfo"
def __init__(self, name, cfg, paths):
self._paths = paths
@@ -66,6 +68,13 @@ class Distro(object):
# to write this blob out in a distro format
raise NotImplementedError()
+ def _find_tz_file(self, tz):
+ tz_file = os.path.join(self.tz_zone_dir, str(tz))
+ if not os.path.isfile(tz_file):
+ raise IOError(("Invalid timezone %s,"
+ " no file found at %s") % (tz, tz_file))
+ return tz_file
+
def get_option(self, opt_name, default=None):
return self._cfg.get(opt_name, default)
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 0811eefd..8fe49cbe 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -44,7 +44,6 @@ class Distro(distros.Distro):
network_conf_fn = "/etc/network/interfaces"
tz_conf_fn = "/etc/timezone"
tz_local_fn = "/etc/localtime"
- tz_zone_dir = "/usr/share/zoneinfo"
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -130,12 +129,7 @@ class Distro(distros.Distro):
return "127.0.1.1"
def set_timezone(self, tz):
- # TODO(harlowja): move this code into
- # the parent distro...
- tz_file = os.path.join(self.tz_zone_dir, str(tz))
- if not os.path.isfile(tz_file):
- raise RuntimeError(("Invalid timezone %s,"
- " no file found at %s") % (tz, tz_file))
+ tz_file = self._find_tz_file(tz)
# Note: "" provides trailing newline during join
tz_lines = [
util.make_header(),
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index a022ca60..30195384 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -20,8 +20,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
-
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
@@ -51,7 +49,6 @@ class Distro(distros.Distro):
network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s'
resolve_conf_fn = "/etc/resolv.conf"
tz_local_fn = "/etc/localtime"
- tz_zone_dir = "/usr/share/zoneinfo"
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -164,12 +161,7 @@ class Distro(distros.Distro):
return distros.Distro._bring_up_interfaces(self, device_names)
def set_timezone(self, tz):
- # TODO(harlowja): move this code into
- # the parent distro...
- tz_file = os.path.join(self.tz_zone_dir, str(tz))
- if not os.path.isfile(tz_file):
- raise RuntimeError(("Invalid timezone %s,"
- " no file found at %s") % (tz, tz_file))
+ tz_file = self._find_tz_file(tz)
if self._dist_uses_systemd():
# Currently, timedatectl complains if invoked during startup
# so for compatibility, create the link manually.
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index 904e931a..f2ac4efc 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -18,8 +18,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
-
from cloudinit import distros
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -42,7 +40,6 @@ class Distro(distros.Distro):
network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
resolve_conf_fn = '/etc/resolv.conf'
tz_local_fn = '/etc/localtime'
- tz_zone_dir = '/usr/share/zoneinfo'
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -151,12 +148,7 @@ class Distro(distros.Distro):
return distros.Distro._bring_up_interfaces(self, device_names)
def set_timezone(self, tz):
- # TODO(harlowja): move this code into
- # the parent distro...
- tz_file = os.path.join(self.tz_zone_dir, str(tz))
- if not os.path.isfile(tz_file):
- raise RuntimeError(("Invalid timezone %s,"
- " no file found at %s") % (tz, tz_file))
+ tz_file = self._find_tz_file(tz)
# Adjust the sysconfig clock zone setting
clock_cfg = {
'TIMEZONE': str(tz),
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 1d450061..2ddc75f4 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -62,6 +62,7 @@ INCLUSION_TYPES_MAP = {
'#part-handler': 'text/part-handler',
'#cloud-boothook': 'text/cloud-boothook',
'#cloud-config-archive': 'text/cloud-config-archive',
+ '#cloud-config-jsonp': 'text/cloud-config-jsonp',
}
# Sorted longest first
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 730672d7..34a73115 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -20,6 +20,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import jsonpatch
+
from cloudinit import handlers
from cloudinit import log as logging
from cloudinit import mergers
@@ -50,6 +52,13 @@ MERGE_HEADER = 'Merge-Type'
# This gets loaded into yaml with final result {'a': 22}
DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()')
CLOUD_PREFIX = "#cloud-config"
+JSONP_PREFIX = "#cloud-config-jsonp"
+
+# The file header -> content types this module will handle.
+CC_TYPES = {
+ JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX),
+ CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX),
+}
class CloudConfigPartHandler(handlers.Handler):
@@ -60,9 +69,7 @@ class CloudConfigPartHandler(handlers.Handler):
self.file_names = []
def list_types(self):
- return [
- handlers.type_from_starts_with(CLOUD_PREFIX),
- ]
+ return list(CC_TYPES.values())
def _write_cloud_config(self):
if not self.cloud_fn:
@@ -108,13 +115,21 @@ class CloudConfigPartHandler(handlers.Handler):
all_mergers = DEF_MERGERS
return (payload_yaml, all_mergers)
+ def _merge_patch(self, payload):
+ # JSON doesn't handle comments in this manner, so ensure that
+ # if we started with this 'type' that we remove it before
+ # attempting to load it as json (which the jsonpatch library will
+ # attempt to do).
+ payload = payload.lstrip()
+ payload = util.strip_prefix_suffix(payload, prefix=JSONP_PREFIX)
+ patch = jsonpatch.JsonPatch.from_string(payload)
+ LOG.debug("Merging by applying json patch %s", patch)
+ self.cloud_buf = patch.apply(self.cloud_buf, in_place=False)
+
def _merge_part(self, payload, headers):
(payload_yaml, my_mergers) = self._extract_mergers(payload, headers)
LOG.debug("Merging by applying %s", my_mergers)
merger = mergers.construct(my_mergers)
- if self.cloud_buf is None:
- # First time through, merge with an empty dict...
- self.cloud_buf = {}
self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml)
def _reset(self):
@@ -131,7 +146,13 @@ class CloudConfigPartHandler(handlers.Handler):
self._reset()
return
try:
- self._merge_part(payload, headers)
+ # First time through, merge with an empty dict...
+ if self.cloud_buf is None or not self.file_names:
+ self.cloud_buf = {}
+ if ctype == CC_TYPES[JSONP_PREFIX]:
+ self._merge_patch(payload)
+ else:
+ self._merge_part(payload, headers)
# Ensure filename is ok to store
for i in ("\n", "\r", "\t"):
filename = filename.replace(i, " ")
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index dc371cd2..9f6badae 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -37,6 +37,7 @@ CFG_BUILTIN = {
'MAAS',
'Ec2',
'CloudStack',
+ 'SmartOS',
# At the end to act as a 'catch' when none of the above work...
'None',
],
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 0a5caebe..66d7728b 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
+import crypt
import os
import os.path
import time
@@ -31,9 +32,21 @@ LOG = logging.getLogger(__name__)
DS_NAME = 'Azure'
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
AGENT_START = ['service', 'walinuxagent', 'start']
-BUILTIN_DS_CONFIG = {'datasource': {DS_NAME: {
- 'agent_command': AGENT_START,
- 'data_dir': "/var/lib/waagent"}}}
+BOUNCE_COMMAND = ['sh', '-xc',
+ "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
+
+BUILTIN_DS_CONFIG = {
+ 'agent_command': AGENT_START,
+ 'data_dir': "/var/lib/waagent",
+ 'set_hostname': True,
+ 'hostname_bounce': {
+ 'interface': 'eth0',
+ 'policy': True,
+ 'command': BOUNCE_COMMAND,
+ 'hostname_command': 'hostname',
+ }
+}
+DS_CFG_PATH = ['datasource', DS_NAME]
class DataSourceAzureNet(sources.DataSource):
@@ -42,19 +55,19 @@ class DataSourceAzureNet(sources.DataSource):
self.seed_dir = os.path.join(paths.seed_dir, 'azure')
self.cfg = {}
self.seed = None
+ self.ds_cfg = util.mergemanydict([
+ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
+ BUILTIN_DS_CONFIG])
def __str__(self):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
def get_data(self):
- ddir_cfgpath = ['datasource', DS_NAME, 'data_dir']
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
- ddir = util.get_cfg_by_path(self.sys_cfg, ddir_cfgpath)
- if ddir is None:
- ddir = util.get_cfg_by_path(BUILTIN_DS_CONFIG, ddir_cfgpath)
+ ddir = self.ds_cfg['data_dir']
candidates = [self.seed_dir]
candidates.extend(list_possible_azure_ds_devs())
@@ -91,44 +104,46 @@ class DataSourceAzureNet(sources.DataSource):
return False
if found == ddir:
- LOG.debug("using cached datasource in %s", ddir)
-
- fields = [('cmd', ['datasource', DS_NAME, 'agent_command']),
- ('datadir', ddir_cfgpath)]
- mycfg = {}
- for cfg in (self.cfg, self.sys_cfg, BUILTIN_DS_CONFIG):
- for name, path in fields:
- if name in mycfg:
- continue
- value = util.get_cfg_by_path(cfg, keyp=path)
- if value is not None:
- mycfg[name] = value
+ LOG.debug("using files cached in %s", ddir)
+
+ # now update ds_cfg to reflect contents pass in config
+ usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
+ self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg])
+ mycfg = self.ds_cfg
# walinux agent writes files world readable, but expects
# the directory to be protected.
- write_files(mycfg['datadir'], files, dirmode=0700)
+ write_files(mycfg['data_dir'], files, dirmode=0700)
+
+ # handle the hostname 'publishing'
+ try:
+ handle_set_hostname(mycfg.get('set_hostname'),
+ self.metadata.get('local-hostname'),
+ mycfg['hostname_bounce'])
+ except Exception as e:
+ LOG.warn("Failed publishing hostname: %s" % e)
+ util.logexc(LOG, "handling set_hostname failed")
try:
- invoke_agent(mycfg['cmd'])
+ invoke_agent(mycfg['agent_command'])
except util.ProcessExecutionError:
# claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.", mycfg['cmd'])
+ util.logexc(LOG, "agent command '%s' failed.",
+ mycfg['agent_command'])
- shcfgxml = os.path.join(mycfg['datadir'], "SharedConfig.xml")
+ shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml")
wait_for = [shcfgxml]
fp_files = []
for pk in self.cfg.get('_pubkeys', []):
bname = pk['fingerprint'] + ".crt"
- fp_files += [os.path.join(mycfg['datadir'], bname)]
+ fp_files += [os.path.join(mycfg['data_dir'], bname)]
- start = time.time()
- missing = wait_for_files(wait_for + fp_files)
+ missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+ func=wait_for_files,
+ args=(wait_for + fp_files,))
if len(missing):
LOG.warn("Did not find files, but going on: %s", missing)
- else:
- LOG.debug("waited %.3f seconds for %d files to appear",
- time.time() - start, len(wait_for))
if shcfgxml in missing:
LOG.warn("SharedConfig.xml missing, using static instance-id")
@@ -148,6 +163,56 @@ class DataSourceAzureNet(sources.DataSource):
return self.cfg
+def handle_set_hostname(enabled, hostname, cfg):
+ if not util.is_true(enabled):
+ return
+
+ if not hostname:
+ LOG.warn("set_hostname was true but no local-hostname")
+ return
+
+ apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
+ interface=cfg['interface'],
+ command=cfg['command'],
+ hostname_command=cfg['hostname_command'])
+
+
+def apply_hostname_bounce(hostname, policy, interface, command,
+ hostname_command="hostname"):
+ # set the hostname to 'hostname' if it is not already set to that.
+ # then, if policy is not off, bounce the interface using command
+ prev_hostname = util.subp(hostname_command, capture=True)[0].strip()
+
+ util.subp([hostname_command, hostname])
+
+ msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
+ (prev_hostname, hostname, policy, interface))
+
+ if util.is_false(policy):
+ LOG.debug("pubhname: policy false, skipping [%s]", msg)
+ return
+
+ if prev_hostname == hostname and policy != "force":
+ LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
+ return
+
+ env = os.environ.copy()
+ env['interface'] = interface
+ env['hostname'] = hostname
+ env['old_hostname'] = prev_hostname
+
+ if command == "builtin":
+ command = BOUNCE_COMMAND
+
+ LOG.debug("pubhname: publishing hostname [%s]", msg)
+ shell = not isinstance(command, (list, tuple))
+ # capture=False, see comments in bug 1202758 and bug 1206164.
+ util.log_time(logfunc=LOG.debug, msg="publishing hostname",
+ get_uptime=True, func=util.subp,
+ kwargs={'args': command, 'shell': shell, 'capture': False,
+ 'env': env})
+
+
def crtfile_to_pubkey(fname):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
@@ -319,15 +384,21 @@ def read_azure_ovf(contents):
name = child.localName.lower()
simple = False
+ value = ""
if (len(child.childNodes) == 1 and
child.childNodes[0].nodeType == dom.TEXT_NODE):
simple = True
value = child.childNodes[0].wholeText
+ attrs = {k: v for k, v in child.attributes.items()}
+
# we accept either UserData or CustomData. If both are present
# then behavior is undefined.
if (name == "userdata" or name == "customdata"):
- ud = base64.b64decode(''.join(value.split()))
+ if attrs.get('encoding') in (None, "base64"):
+ ud = base64.b64decode(''.join(value.split()))
+ else:
+ ud = value
elif name == "username":
username = value
elif name == "userpassword":
@@ -335,7 +406,11 @@ def read_azure_ovf(contents):
elif name == "hostname":
md['local-hostname'] = value
elif name == "dscfg":
- cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})}
+ if attrs.get('encoding') in (None, "base64"):
+ dscfg = base64.b64decode(''.join(value.split()))
+ else:
+ dscfg = value
+ cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
elif name == "ssh":
cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
elif name == "disablesshpasswordauthentication":
@@ -350,7 +425,7 @@ def read_azure_ovf(contents):
if username:
defuser['name'] = username
if password:
- defuser['password'] = password
+ defuser['passwd'] = encrypt_pass(password)
defuser['lock_passwd'] = False
if defuser:
@@ -362,6 +437,10 @@ def read_azure_ovf(contents):
return (md, ud, cfg)
+def encrypt_pass(password, salt_id="$6$"):
+ return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
+
+
def list_possible_azure_ds_devs():
# return a sorted list of devices that might have a azure datasource
devlist = []
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
new file mode 100644
index 00000000..d348d20b
--- /dev/null
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -0,0 +1,244 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2013 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Datasource for provisioning on SmartOS. This works on Joyent
+# and public/private Clouds using SmartOS.
+#
+# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests.
+# The meta-data is transmitted via key/value pairs made by
+# requests on the console. For example, to get the hostname, you
+# would send "GET hostname" on /dev/ttyS1.
+#
+
+
+import base64
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+import os
+import os.path
+import serial
+
+DEF_TTY_LOC = '/dev/ttyS1'
+DEF_TTY_TIMEOUT = 60
+LOG = logging.getLogger(__name__)
+
+SMARTOS_ATTRIB_MAP = {
+ #Cloud-init Key : (SmartOS Key, Strip line endings)
+ 'local-hostname': ('hostname', True),
+ 'public-keys': ('root_authorized_keys', True),
+ 'user-script': ('user-script', False),
+ 'user-data': ('user-data', False),
+ 'iptables_disable': ('iptables_disable', True),
+ 'motd_sys_info': ('motd_sys_info', True),
+}
+
+# These are values which will never be base64 encoded.
+# They come from the cloud platform, not user
+SMARTOS_NO_BASE64 = ['root_authorized_keys', 'motd_sys_info',
+ 'iptables_disable']
+
+
+class DataSourceSmartOS(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed_dir = os.path.join(paths.seed_dir, 'sdc')
+ self.is_smartdc = None
+
+ self.seed = self.ds_cfg.get("serial_device", DEF_TTY_LOC)
+ self.seed_timeout = self.ds_cfg.get("serial_timeout", DEF_TTY_TIMEOUT)
+ self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode',
+ SMARTOS_NO_BASE64)
+ self.b64_keys = self.ds_cfg.get('base64_keys', [])
+ self.b64_all = self.ds_cfg.get('base64_all', False)
+
+ def __str__(self):
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s]" % (root, self.seed)
+
+ def get_data(self):
+ md = {}
+ ud = ""
+
+ if not os.path.exists(self.seed):
+ LOG.debug("Host does not appear to be on SmartOS")
+ return False
+ self.seed = self.seed
+
+ dmi_info = dmi_data()
+ if dmi_info is False:
+ LOG.debug("No dmidata utility found")
+ return False
+
+ system_uuid, system_type = dmi_info
+ if 'smartdc' not in system_type.lower():
+ LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
+ return False
+ self.is_smartdc = True
+ md['instance-id'] = system_uuid
+
+ b64_keys = self.query('base64_keys', strip=True, b64=False)
+ if b64_keys is not None:
+ self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
+
+ b64_all = self.query('base64_all', strip=True, b64=False)
+ if b64_all is not None:
+ self.b64_all = util.is_true(b64_all)
+
+ for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
+ smartos_noun, strip = attribute
+ md[ci_noun] = self.query(smartos_noun, strip=strip)
+
+ if not md['local-hostname']:
+ md['local-hostname'] = system_uuid
+
+ ud = None
+ if md['user-data']:
+ ud = md['user-data']
+ elif md['user-script']:
+ ud = md['user-script']
+
+ self.metadata = md
+ self.userdata_raw = ud
+ return True
+
+ def get_instance_id(self):
+ return self.metadata['instance-id']
+
+ def query(self, noun, strip=False, default=None, b64=None):
+ if b64 is None:
+ if noun in self.smartos_no_base64:
+ b64 = False
+ elif self.b64_all or noun in self.b64_keys:
+ b64 = True
+
+ return query_data(noun=noun, strip=strip, seed_device=self.seed,
+ seed_timeout=self.seed_timeout, default=default,
+ b64=b64)
+
+
+def get_serial(seed_device, seed_timeout):
+ """This is replaced in unit testing, allowing us to replace
+ serial.Serial with a mocked class.
+
+ The timeout value of 60 seconds should never be hit. The value
+ is taken from SmartOS own provisioning tools. Since we are reading
+ each line individually up until the single ".", the transfer is
+ usually very fast (i.e. microseconds) to get the response.
+ """
+ if not seed_device:
+ raise AttributeError("seed_device value is not set")
+
+ ser = serial.Serial(seed_device, timeout=seed_timeout)
+ if not ser.isOpen():
+ raise SystemError("Unable to open %s" % seed_device)
+
+ return ser
+
+
+def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
+ b64=None):
+ """Makes a request to via the serial console via "GET <NOUN>"
+
+ In the response, the first line is the status, while subsequent lines
+ are is the value. A blank line with a "." is used to indicate end of
+ response.
+
+ If the response is expected to be base64 encoded, then set b64encoded
+ to true. Unfortantely, there is no way to know if something is 100%
+ encoded, so this method relies on being told if the data is base64 or
+ not.
+ """
+
+ if not noun:
+ return False
+
+ ser = get_serial(seed_device, seed_timeout)
+ ser.write("GET %s\n" % noun.rstrip())
+ status = str(ser.readline()).rstrip()
+ response = []
+ eom_found = False
+
+ if 'SUCCESS' not in status:
+ ser.close()
+ return default
+
+ while not eom_found:
+ m = ser.readline()
+ if m.rstrip() == ".":
+ eom_found = True
+ else:
+ response.append(m)
+
+ ser.close()
+
+ if b64 is None:
+ b64 = query_data('b64-%s' % noun, seed_device=seed_device,
+ seed_timeout=seed_timeout, b64=False,
+ default=False, strip=True)
+ b64 = util.is_true(b64)
+
+ resp = None
+ if b64 or strip:
+ resp = "".join(response).rstrip()
+ else:
+ resp = "".join(response)
+
+ if b64:
+ try:
+ return base64.b64decode(resp)
+ except TypeError:
+ LOG.warn("Failed base64 decoding key '%s'", noun)
+ return resp
+
+ return resp
+
+
+def dmi_data():
+ sys_uuid, sys_type = None, None
+ dmidecode_path = util.which('dmidecode')
+ if not dmidecode_path:
+ return False
+
+ sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"]
+ try:
+ LOG.debug("Getting hostname from dmidecode")
+ (sys_uuid, _err) = util.subp(sys_uuid_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to get system UUID", e)
+
+ sys_type_cmd = [dmidecode_path, "-s", "system-product-name"]
+ try:
+ LOG.debug("Determining hypervisor product name via dmidecode")
+ (sys_type, _err) = util.subp(sys_type_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed to get system UUID", e)
+
+ return sys_uuid.lower(), sys_type
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 47d71ef4..5032cc47 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1751,3 +1751,61 @@ def get_mount_info(path, log=LOG):
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log)
+
+
+def which(program):
+ # Return path of program for execution if found in path
+ def is_exe(fpath):
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+ _fpath, _ = os.path.split(program)
+ if _fpath:
+ if is_exe(program):
+ return program
+ else:
+ for path in os.environ["PATH"].split(os.pathsep):
+ path = path.strip('"')
+ exe_file = os.path.join(path, program)
+ if is_exe(exe_file):
+ return exe_file
+
+ return None
+
+
+def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
+
+ start = time.time()
+
+ ustart = None
+ if get_uptime:
+ try:
+ ustart = float(uptime())
+ except ValueError:
+ pass
+
+ try:
+ ret = func(*args, **kwargs)
+ finally:
+ delta = time.time() - start
+ udelta = None
+ if ustart is not None:
+ try:
+ udelta = float(uptime()) - ustart
+ except ValueError:
+ pass
+
+ tmsg = " took %0.3f seconds" % delta
+ if get_uptime:
+ if isinstance(udelta, (float)):
+ tmsg += " (%0.2f)" % udelta
+ else:
+ tmsg += " (N/A)"
+ try:
+ logfunc(msg + tmsg)
+ except:
+ pass
+ return ret