summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2016-04-04 12:07:19 -0400
committerScott Moser <smoser@ubuntu.com>2016-04-04 12:07:19 -0400
commit7d8a3194552387fa9e21216bcd9a3bfc76fa2b04 (patch)
treec8dc45b013208a4e5e09e6ade63b3b5994f80aa3 /cloudinit
parent93f5af9f5075a416c65c1d0350c374e16f32f0d5 (diff)
parent210b041b2fead7a57af91f60a6f89d9e5aa1ed4a (diff)
downloadvyos-cloud-init-7d8a3194552387fa9e21216bcd9a3bfc76fa2b04.tar.gz
vyos-cloud-init-7d8a3194552387fa9e21216bcd9a3bfc76fa2b04.zip
merge with trunk
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/cloud.py9
-rw-r--r--cloudinit/config/cc_apt_configure.py14
-rw-r--r--cloudinit/config/cc_apt_pipelining.py2
-rw-r--r--cloudinit/config/cc_bootcmd.py2
-rw-r--r--cloudinit/config/cc_ca_certs.py4
-rw-r--r--cloudinit/config/cc_chef.py6
-rw-r--r--cloudinit/config/cc_debug.py7
-rw-r--r--cloudinit/config/cc_disk_setup.py165
-rw-r--r--cloudinit/config/cc_emit_upstart.py27
-rw-r--r--cloudinit/config/cc_fan.py101
-rw-r--r--cloudinit/config/cc_final_message.py9
-rw-r--r--cloudinit/config/cc_growpart.py2
-rw-r--r--cloudinit/config/cc_grub_dpkg.py26
-rw-r--r--cloudinit/config/cc_keys_to_console.py2
-rw-r--r--cloudinit/config/cc_landscape.py14
-rw-r--r--cloudinit/config/cc_locale.py6
-rw-r--r--cloudinit/config/cc_lxd.py85
-rw-r--r--cloudinit/config/cc_mcollective.py15
-rw-r--r--cloudinit/config/cc_mounts.py123
-rw-r--r--cloudinit/config/cc_phone_home.py4
-rw-r--r--cloudinit/config/cc_power_state_change.py57
-rw-r--r--cloudinit/config/cc_puppet.py16
-rw-r--r--cloudinit/config/cc_resizefs.py2
-rw-r--r--cloudinit/config/cc_resolv_conf.py4
-rw-r--r--cloudinit/config/cc_rh_subscription.py402
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py6
-rw-r--r--cloudinit/config/cc_rsyslog.py343
-rw-r--r--cloudinit/config/cc_runcmd.py2
-rw-r--r--cloudinit/config/cc_salt_minion.py2
-rw-r--r--cloudinit/config/cc_seed_random.py17
-rw-r--r--cloudinit/config/cc_set_hostname.py2
-rw-r--r--cloudinit/config/cc_set_passwords.py25
-rw-r--r--cloudinit/config/cc_snappy.py304
-rw-r--r--cloudinit/config/cc_ssh.py74
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py2
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py6
-rw-r--r--cloudinit/config/cc_update_hostname.py2
-rw-r--r--cloudinit/config/cc_write_files.py5
-rw-r--r--cloudinit/config/cc_yum_add_repo.py9
-rw-r--r--cloudinit/cs_utils.py4
-rw-r--r--cloudinit/distros/__init__.py173
-rw-r--r--cloudinit/distros/arch.py10
-rw-r--r--cloudinit/distros/debian.py20
-rw-r--r--cloudinit/distros/freebsd.py13
-rw-r--r--cloudinit/distros/gentoo.py6
-rw-r--r--cloudinit/distros/net_util.py2
-rw-r--r--cloudinit/distros/parsers/hostname.py4
-rw-r--r--cloudinit/distros/parsers/hosts.py2
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py4
-rw-r--r--cloudinit/distros/parsers/sys_conf.py12
-rw-r--r--cloudinit/distros/rhel.py19
-rw-r--r--cloudinit/distros/rhel_util.py4
-rw-r--r--cloudinit/distros/sles.py4
-rw-r--r--cloudinit/ec2_utils.py22
-rw-r--r--cloudinit/filters/launch_index.py2
-rw-r--r--cloudinit/handlers/__init__.py43
-rw-r--r--cloudinit/handlers/boot_hook.py2
-rw-r--r--cloudinit/handlers/cloud_config.py2
-rw-r--r--cloudinit/handlers/shell_script.py2
-rw-r--r--cloudinit/handlers/upstart_job.py2
-rw-r--r--cloudinit/helpers.py45
-rw-r--r--cloudinit/log.py7
-rw-r--r--cloudinit/mergers/__init__.py4
-rw-r--r--cloudinit/mergers/m_dict.py4
-rw-r--r--cloudinit/mergers/m_list.py6
-rw-r--r--cloudinit/mergers/m_str.py10
-rw-r--r--cloudinit/net/__init__.py751
-rw-r--r--cloudinit/net/network_state.py446
-rw-r--r--cloudinit/net/udev.py54
-rw-r--r--cloudinit/netinfo.py4
-rw-r--r--cloudinit/registry.py37
-rw-r--r--cloudinit/reporting/__init__.py42
-rw-r--r--cloudinit/reporting/events.py246
-rw-r--r--cloudinit/reporting/handlers.py91
-rw-r--r--cloudinit/settings.py3
-rw-r--r--cloudinit/signal_handler.py2
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py39
-rw-r--r--cloudinit/sources/DataSourceAzure.py324
-rw-r--r--cloudinit/sources/DataSourceBigstep.py57
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py22
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py80
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py145
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py23
-rw-r--r--cloudinit/sources/DataSourceEc2.py23
-rw-r--r--cloudinit/sources/DataSourceGCE.py103
-rw-r--r--cloudinit/sources/DataSourceMAAS.py213
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py131
-rw-r--r--cloudinit/sources/DataSourceNone.py4
-rw-r--r--cloudinit/sources/DataSourceOVF.py141
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py20
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py4
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py337
-rw-r--r--cloudinit/sources/__init__.py62
-rw-r--r--cloudinit/sources/helpers/azure.py278
-rw-r--r--cloudinit/sources/helpers/openstack.py21
-rw-r--r--cloudinit/sources/helpers/vmware/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py95
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py129
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py247
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py23
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py24
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py27
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py128
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py147
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py154
-rw-r--r--cloudinit/ssh_util.py9
-rw-r--r--cloudinit/stages.py241
-rw-r--r--cloudinit/templater.py2
-rw-r--r--cloudinit/type_utils.py32
-rw-r--r--cloudinit/url_helper.py196
-rw-r--r--cloudinit/user_data.py27
-rw-r--r--cloudinit/util.py355
117 files changed, 6784 insertions, 1232 deletions
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 95e0cfb2..3e6be203 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -24,6 +24,7 @@ import copy
import os
from cloudinit import log as logging
+from cloudinit.reporting import events
LOG = logging.getLogger(__name__)
@@ -40,12 +41,18 @@ LOG = logging.getLogger(__name__)
class Cloud(object):
- def __init__(self, datasource, paths, cfg, distro, runners):
+ def __init__(self, datasource, paths, cfg, distro, runners, reporter=None):
self.datasource = datasource
self.paths = paths
self.distro = distro
self._cfg = cfg
self._runners = runners
+ if reporter is None:
+ reporter = events.ReportEventStack(
+ name="unnamed-cloud-reporter",
+ description="unnamed-cloud-reporter",
+ reporting_enabled=False)
+ self.reporter = reporter
# If a 'user' manipulates logging or logging services
# it is typically useful to cause the logging to be
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index f10b76a3..702977cb 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -51,6 +51,10 @@ EXPORT_GPG_KEYID = """
def handle(name, cfg, cloud, log, _args):
+ if util.is_false(cfg.get('apt_configure_enabled', True)):
+ log.debug("Skipping module named %s, disabled by config.", name)
+ return
+
release = get_release()
mirrors = find_apt_mirror_info(cloud, cfg)
if not mirrors or "primary" not in mirrors:
@@ -87,7 +91,8 @@ def handle(name, cfg, cloud, log, _args):
if matchcfg:
matcher = re.compile(matchcfg).search
else:
- matcher = lambda f: False
+ def matcher(x):
+ return False
errors = add_sources(cfg['apt_sources'], params,
aa_repo_match=matcher)
@@ -105,7 +110,7 @@ def handle(name, cfg, cloud, log, _args):
# get gpg keyid from keyserver
def getkeybyid(keyid, keyserver):
- with util.ExtendedTemporaryFile(suffix='.sh') as fh:
+ with util.ExtendedTemporaryFile(suffix='.sh', mode="w+", ) as fh:
fh.write(EXPORT_GPG_KEYID)
fh.flush()
cmd = ['/bin/sh', fh.name, keyid, keyserver]
@@ -126,7 +131,7 @@ def mirror2lists_fileprefix(mirror):
def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
- for (name, omirror) in old_mirrors.iteritems():
+ for (name, omirror) in old_mirrors.items():
nmirror = new_mirrors.get(name)
if not nmirror:
continue
@@ -169,7 +174,8 @@ def add_sources(srclist, template_params=None, aa_repo_match=None):
template_params = {}
if aa_repo_match is None:
- aa_repo_match = lambda f: False
+ def aa_repo_match(x):
+ return False
errorlist = []
for ent in srclist:
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index e5629175..40c32c84 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -43,7 +43,7 @@ def handle(_name, cfg, _cloud, log, _args):
write_apt_snippet("0", log, DEFAULT_FILE)
elif apt_pipe_value_s in ("none", "unchanged", "os"):
return
- elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]:
+ elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
else:
log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 3ac22967..a295cc4e 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -36,7 +36,7 @@ def handle(name, cfg, cloud, log, _args):
with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:
try:
content = util.shellify(cfg["bootcmd"])
- tmpf.write(content)
+ tmpf.write(util.encode_text(content))
tmpf.flush()
except:
util.logexc(log, "Failed to shellify bootcmd")
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 4f2a46a1..8248b020 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -44,7 +44,7 @@ def add_ca_certs(certs):
if certs:
# First ensure they are strings...
cert_file_contents = "\n".join([str(c) for c in certs])
- util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644)
+ util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
# Append cert filename to CA_CERT_CONFIG file.
# We have to strip the content because blank lines in the file
@@ -63,7 +63,7 @@ def remove_default_ca_certs():
"""
util.delete_dir_contents(CA_CERT_PATH)
util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
- util.write_file(CA_CERT_CONFIG, "", mode=0644)
+ util.write_file(CA_CERT_CONFIG, "", mode=0o644)
debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
util.subp(('debconf-set-selections', '-'), debconf_sel)
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index fc837363..e18c5405 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -76,6 +76,8 @@ from cloudinit import templater
from cloudinit import url_helper
from cloudinit import util
+import six
+
RUBY_VERSION_DEFAULT = "1.8"
CHEF_DIRS = tuple([
@@ -261,7 +263,7 @@ def run_chef(chef_cfg, log):
cmd_args = chef_cfg['exec_arguments']
if isinstance(cmd_args, (list, tuple)):
cmd.extend(cmd_args)
- elif isinstance(cmd_args, (str, basestring)):
+ elif isinstance(cmd_args, six.string_types):
cmd.append(cmd_args)
else:
log.warn("Unknown type %s provided for chef"
@@ -300,7 +302,7 @@ def install_chef(cloud, chef_cfg, log):
with util.tempdir() as tmpd:
# Use tmpdir over tmpfile to avoid 'text file busy' on execute
tmpf = "%s/chef-omnibus-install" % tmpd
- util.write_file(tmpf, str(content), mode=0700)
+ util.write_file(tmpf, content, mode=0o700)
util.subp([tmpf], capture=False)
else:
log.warn("Unknown chef install type '%s'", install_type)
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index 8c489426..bdc32fe6 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -34,7 +34,8 @@ It can be configured with the following option structure::
"""
import copy
-from StringIO import StringIO
+
+from six import StringIO
from cloudinit import type_utils
from cloudinit import util
@@ -77,7 +78,7 @@ def handle(name, cfg, cloud, log, args):
dump_cfg = copy.deepcopy(cfg)
for k in SKIP_KEYS:
dump_cfg.pop(k, None)
- all_keys = list(dump_cfg.keys())
+ all_keys = list(dump_cfg)
for k in all_keys:
if k.startswith("_"):
dump_cfg.pop(k, None)
@@ -103,6 +104,6 @@ def handle(name, cfg, cloud, log, args):
line = "ci-info: %s\n" % (line)
content_to_file.append(line)
if out_file:
- util.write_file(out_file, "".join(content_to_file), 0644, "w")
+ util.write_file(out_file, "".join(content_to_file), 0o644, "w")
else:
util.multi_log("".join(content_to_file), console=True, stderr=False)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 1660832b..0ecc2e4c 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -27,6 +27,7 @@ frequency = PER_INSTANCE
# Define the commands to use
UDEVADM_CMD = util.which('udevadm')
SFDISK_CMD = util.which("sfdisk")
+SGDISK_CMD = util.which("sgdisk")
LSBLK_CMD = util.which("lsblk")
BLKID_CMD = util.which("blkid")
BLKDEV_CMD = util.which("blockdev")
@@ -151,7 +152,7 @@ def enumerate_disk(device, nodeps=False):
name: the device name, i.e. sda
"""
- lsblk_cmd = [LSBLK_CMD, '--pairs', '--out', 'NAME,TYPE,FSTYPE,LABEL',
+ lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL',
device]
if nodeps:
@@ -166,11 +167,12 @@ def enumerate_disk(device, nodeps=False):
parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
for part in parts:
- d = {'name': None,
- 'type': None,
- 'fstype': None,
- 'label': None,
- }
+ d = {
+ 'name': None,
+ 'type': None,
+ 'fstype': None,
+ 'label': None,
+ }
for key, value in value_splitter(part):
d[key.lower()] = value
@@ -303,8 +305,7 @@ def is_disk_used(device):
# If the child count is higher 1, then there are child nodes
# such as partition or device mapper nodes
- use_count = [x for x in enumerate_disk(device)]
- if len(use_count.splitlines()) > 1:
+ if len(list(enumerate_disk(device))) > 1:
return True
# If we see a file system, then its used
@@ -315,22 +316,6 @@ def is_disk_used(device):
return False
-def get_hdd_size(device):
- """
- Returns the hard disk size.
- This works with any disk type, including GPT.
- """
-
- size_cmd = [SFDISK_CMD, '--show-size', device]
- size = None
- try:
- size, _err = util.subp(size_cmd)
- except Exception as e:
- raise Exception("Failed to get %s size\n%s" % (device, e))
-
- return int(size.strip())
-
-
def get_dyn_func(*args):
"""
Call the appropriate function.
@@ -358,6 +343,30 @@ def get_dyn_func(*args):
raise Exception("No such function %s to call!" % func_name)
+def get_mbr_hdd_size(device):
+ size_cmd = [SFDISK_CMD, '--show-size', device]
+ size = None
+ try:
+ size, _err = util.subp(size_cmd)
+ except Exception as e:
+ raise Exception("Failed to get %s size\n%s" % (device, e))
+
+ return int(size.strip())
+
+
+def get_gpt_hdd_size(device):
+ out, _ = util.subp([SGDISK_CMD, '-p', device])
+ return out.splitlines()[0].split()[2]
+
+
+def get_hdd_size(table_type, device):
+ """
+ Returns the hard disk size.
+ This works with any disk type, including GPT.
+ """
+ return get_dyn_func("get_%s_hdd_size", table_type, device)
+
+
def check_partition_mbr_layout(device, layout):
"""
Returns true if the partition layout matches the one on the disk
@@ -393,6 +402,36 @@ def check_partition_mbr_layout(device, layout):
break
found_layout.append(type_label)
+ return found_layout
+
+
+def check_partition_gpt_layout(device, layout):
+ prt_cmd = [SGDISK_CMD, '-p', device]
+ try:
+ out, _err = util.subp(prt_cmd)
+ except Exception as e:
+ raise Exception("Error running partition command on %s\n%s" % (
+ device, e))
+
+ out_lines = iter(out.splitlines())
+ # Skip header
+ for line in out_lines:
+ if line.strip().startswith('Number'):
+ break
+
+ return [line.strip().split()[-1] for line in out_lines]
+
+
+def check_partition_layout(table_type, device, layout):
+ """
+ See if the partition lay out matches.
+
+ This is future a future proofing function. In order
+ to add support for other disk layout schemes, add a
+ function called check_partition_%s_layout
+ """
+ found_layout = get_dyn_func(
+ "check_partition_%s_layout", table_type, device, layout)
if isinstance(layout, bool):
# if we are using auto partitioning, or "True" be happy
@@ -417,18 +456,6 @@ def check_partition_mbr_layout(device, layout):
return False
-def check_partition_layout(table_type, device, layout):
- """
- See if the partition lay out matches.
-
- This is future a future proofing function. In order
- to add support for other disk layout schemes, add a
- function called check_partition_%s_layout
- """
- return get_dyn_func("check_partition_%s_layout", table_type, device,
- layout)
-
-
def get_partition_mbr_layout(size, layout):
"""
Calculate the layout of the partition table. Partition sizes
@@ -481,6 +508,29 @@ def get_partition_mbr_layout(size, layout):
return sfdisk_definition
+def get_partition_gpt_layout(size, layout):
+ if isinstance(layout, bool):
+ return [(None, [0, 0])]
+
+ partition_specs = []
+ for partition in layout:
+ if isinstance(partition, list):
+ if len(partition) != 2:
+ raise Exception(
+ "Partition was incorrectly defined: %s" % partition)
+ percent, partition_type = partition
+ else:
+ percent = partition
+ partition_type = None
+
+ part_size = int(float(size) * (float(percent) / 100))
+ partition_specs.append((partition_type, [0, '+{}'.format(part_size)]))
+
+ # The last partition should use up all remaining space
+ partition_specs[-1][-1][-1] = 0
+ return partition_specs
+
+
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
@@ -556,6 +606,22 @@ def exec_mkpart_mbr(device, layout):
read_parttbl(device)
+def exec_mkpart_gpt(device, layout):
+ try:
+ util.subp([SGDISK_CMD, '-Z', device])
+ for index, (partition_type, (start, end)) in enumerate(layout):
+ index += 1
+ util.subp([SGDISK_CMD,
+ '-n', '{}:{}:{}'.format(index, start, end), device])
+ if partition_type is not None:
+ util.subp(
+ [SGDISK_CMD,
+ '-t', '{}:{}'.format(index, partition_type), device])
+ except Exception:
+ LOG.warn("Failed to partition device %s" % device)
+ raise
+
+
def exec_mkpart(table_type, device, layout):
"""
Fetches the function for creating the table type.
@@ -583,6 +649,8 @@ def mkpart(device, definition):
table_type: Which partition table to use, defaults to MBR
device: the device to work on.
"""
+ # ensure that we get a real device rather than a symbolic link
+ device = os.path.realpath(device)
LOG.debug("Checking values for %s definition" % device)
overwrite = definition.get('overwrite', False)
@@ -618,7 +686,7 @@ def mkpart(device, definition):
return
LOG.debug("Checking for device size")
- device_size = get_hdd_size(device)
+ device_size = get_hdd_size(table_type, device)
LOG.debug("Calculating partition layout")
part_definition = get_partition_layout(table_type, device_size, layout)
@@ -634,11 +702,12 @@ def lookup_force_flag(fs):
"""
A force flag might be -F or -F, this look it up
"""
- flags = {'ext': '-F',
- 'btrfs': '-f',
- 'xfs': '-f',
- 'reiserfs': '-f',
- }
+ flags = {
+ 'ext': '-F',
+ 'btrfs': '-f',
+ 'xfs': '-f',
+ 'reiserfs': '-f',
+ }
if 'ext' in fs.lower():
fs = 'ext'
@@ -680,6 +749,9 @@ def mkfs(fs_cfg):
fs_replace = fs_cfg.get('replace_fs', False)
overwrite = fs_cfg.get('overwrite', False)
+ # ensure that we get a real device rather than a symbolic link
+ device = os.path.realpath(device)
+
# This allows you to define the default ephemeral or swap
LOG.debug("Checking %s against default devices", device)
@@ -754,10 +826,11 @@ def mkfs(fs_cfg):
# Create the commands
if fs_cmd:
- fs_cmd = fs_cfg['cmd'] % {'label': label,
- 'filesystem': fs_type,
- 'device': device,
- }
+ fs_cmd = fs_cfg['cmd'] % {
+ 'label': label,
+ 'filesystem': fs_type,
+ 'device': device,
+ }
else:
# Find the mkfs command
mkfs_cmd = util.which("mkfs.%s" % fs_type)
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index 6d376184..86ae97ab 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -21,11 +21,31 @@
import os
from cloudinit.settings import PER_ALWAYS
+from cloudinit import log as logging
from cloudinit import util
frequency = PER_ALWAYS
distros = ['ubuntu', 'debian']
+LOG = logging.getLogger(__name__)
+
+
+def is_upstart_system():
+ if not os.path.isfile("/sbin/initctl"):
+ LOG.debug("no /sbin/initctl located")
+ return False
+
+ myenv = os.environ.copy()
+ if 'UPSTART_SESSION' in myenv:
+ del myenv['UPSTART_SESSION']
+ check_cmd = ['initctl', 'version']
+ try:
+ (out, err) = util.subp(check_cmd, env=myenv)
+ return 'upstart' in out
+ except util.ProcessExecutionError as e:
+ LOG.debug("'%s' returned '%s', not using upstart",
+ ' '.join(check_cmd), e.exit_code)
+ return False
def handle(name, _cfg, cloud, log, args):
@@ -34,10 +54,11 @@ def handle(name, _cfg, cloud, log, args):
# Default to the 'cloud-config'
# event for backwards compat.
event_names = ['cloud-config']
- if not os.path.isfile("/sbin/initctl"):
- log.debug(("Skipping module named %s,"
- " no /sbin/initctl located"), name)
+
+ if not is_upstart_system():
+ log.debug("not upstart system, '%s' disabled")
return
+
cfgpath = cloud.paths.get_ipath_cur("cloud_config")
for n in event_names:
cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
new file mode 100644
index 00000000..39e3850e
--- /dev/null
+++ b/cloudinit/config/cc_fan.py
@@ -0,0 +1,101 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+fan module allows configuration of Ubuntu Fan
+ https://wiki.ubuntu.com/FanNetworking
+
+Example config:
+ #cloud-config
+ fan:
+ config: |
+ # fan 240
+ 10.0.0.0/8 eth0/16 dhcp
+ 10.0.0.0/8 eth1/16 dhcp off
+ # fan 241
+ 241.0.0.0/8 eth0/16 dhcp
+ config_path: /etc/network/fan
+
+If cloud-init sees a 'fan' entry in cloud-config it will
+ a.) write 'config_path' with the contents
+ b.) install the package 'ubuntu-fan' if it is not installed
+ c.) ensure the service is started (or restarted if was previously running)
+"""
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+
+BUILTIN_CFG = {
+ 'config': None,
+ 'config_path': '/etc/network/fan',
+}
+
+
+def stop_update_start(service, config_file, content, systemd=False):
+ if systemd:
+ cmds = {'stop': ['systemctl', 'stop', service],
+ 'start': ['systemctl', 'start', service],
+ 'enable': ['systemctl', 'enable', service]}
+ else:
+ cmds = {'stop': ['service', 'stop'],
+ 'start': ['service', 'start']}
+
+ def run(cmd, msg):
+ try:
+ return util.subp(cmd, capture=True)
+ except util.ProcessExecutionError as e:
+ LOG.warn("failed: %s (%s): %s", service, cmd, e)
+ return False
+
+ stop_failed = not run(cmds['stop'], msg='stop %s' % service)
+ if not content.endswith('\n'):
+ content += '\n'
+ util.write_file(config_file, content, omode="w")
+
+ ret = run(cmds['start'], msg='start %s' % service)
+ if ret and stop_failed:
+ LOG.warn("success: %s started", service)
+
+ if 'enable' in cmds:
+ ret = run(cmds['enable'], msg='enable %s' % service)
+
+ return ret
+
+
+def handle(name, cfg, cloud, log, args):
+ cfgin = cfg.get('fan')
+ if not cfgin:
+ cfgin = {}
+ mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
+
+ if not mycfg.get('config'):
+ LOG.debug("%s: no 'fan' config entry. disabling", name)
+ return
+
+ util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
+ distro = cloud.distro
+ if not util.which('fanctl'):
+ distro.install_packages(['ubuntu-fan'])
+
+ stop_update_start(
+ service='ubuntu-fan', config_file=mycfg.get('config_path'),
+ content=mycfg.get('config'), systemd=distro.uses_systemd())
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index b24294e4..4a51476f 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -26,9 +26,12 @@ from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-# Cheetah formated default message
-FINAL_MESSAGE_DEF = ("Cloud-init v. ${version} finished at ${timestamp}."
- " Datasource ${datasource}. Up ${uptime} seconds")
+# Jinja formated default message
+FINAL_MESSAGE_DEF = (
+ "## template: jinja\n"
+ "Cloud-init v. {{version}} finished at {{timestamp}}."
+ " Datasource {{datasource}}. Up {{uptime}} seconds"
+)
def handle(_name, cfg, cloud, log, args):
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index f52c41f0..859d69f1 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -276,7 +276,7 @@ def handle(_name, cfg, _cloud, log, _args):
log.debug("use ignore_growroot_disabled to ignore")
return
- devices = util.get_cfg_option_list(cfg, "devices", ["/"])
+ devices = util.get_cfg_option_list(mycfg, "devices", ["/"])
if not len(devices):
log.debug("growpart: empty device list")
return
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index e3219e81..3c2d9985 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -25,19 +25,23 @@ from cloudinit import util
distros = ['ubuntu', 'debian']
-def handle(_name, cfg, _cloud, log, _args):
- idevs = None
- idevs_empty = None
+def handle(name, cfg, _cloud, log, _args):
- if "grub-dpkg" in cfg:
- idevs = util.get_cfg_option_str(cfg["grub-dpkg"],
- "grub-pc/install_devices", None)
- idevs_empty = util.get_cfg_option_str(cfg["grub-dpkg"],
- "grub-pc/install_devices_empty", None)
+ mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
+ if not mycfg:
+ mycfg = {}
+
+ enabled = mycfg.get('enabled', True)
+ if util.is_false(enabled):
+ log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
+ return
+
+ idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
+ idevs_empty = util.get_cfg_option_str(
+ mycfg, "grub-pc/install_devices_empty", None)
if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
- (os.path.exists("/dev/xvda1")
- and not os.path.exists("/dev/xvda"))):
+ (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
if idevs is None:
idevs = ""
if idevs_empty is None:
@@ -61,7 +65,7 @@ def handle(_name, cfg, _cloud, log, _args):
(idevs, idevs_empty))
log.debug("Setting grub debconf-set-selections with '%s','%s'" %
- (idevs, idevs_empty))
+ (idevs, idevs_empty))
try:
util.subp(['debconf-set-selections'], dconf_sel)
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index f1c1adff..aa844ee9 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -48,7 +48,7 @@ def handle(name, cfg, cloud, log, _args):
"ssh_fp_console_blacklist", [])
key_blacklist = util.get_cfg_option_list(cfg,
"ssh_key_console_blacklist",
- ["ssh-dss"])
+ ["ssh-dss"])
try:
cmd = [helper_path]
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 8a709677..68fcb27f 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -20,7 +20,7 @@
import os
-from StringIO import StringIO
+from six import StringIO
from configobj import ConfigObj
@@ -38,12 +38,12 @@ distros = ['ubuntu']
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
LSC_BUILTIN_CFG = {
- 'client': {
- 'log_level': "info",
- 'url': "https://landscape.canonical.com/message-system",
- 'ping_url': "http://landscape.canonical.com/ping",
- 'data_path': "/var/lib/landscape/client",
- }
+ 'client': {
+ 'log_level': "info",
+ 'url': "https://landscape.canonical.com/message-system",
+ 'ping_url': "http://landscape.canonical.com/ping",
+ 'data_path': "/var/lib/landscape/client",
+ }
}
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 6feaae9d..bbe5fcae 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -27,9 +27,9 @@ def handle(name, cfg, cloud, log, args):
else:
locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
- if not locale:
- log.debug(("Skipping module named %s, "
- "no 'locale' configuration found"), name)
+ if util.is_false(locale):
+ log.debug("Skipping module named %s, disabled by config: %s",
+ name, locale)
return
log.debug("Setting locale to %s", locale)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
new file mode 100644
index 00000000..63b8fb63
--- /dev/null
+++ b/cloudinit/config/cc_lxd.py
@@ -0,0 +1,85 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+#
+# Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+This module initializes lxd using 'lxd init'
+
+Example config:
+ #cloud-config
+ lxd:
+ init:
+ network_address: <ip addr>
+ network_port: <port>
+ storage_backend: <zfs/dir>
+ storage_create_device: <dev>
+ storage_create_loop: <size>
+ storage_pool: <name>
+ trust_password: <password>
+"""
+
+from cloudinit import util
+
+
+def handle(name, cfg, cloud, log, args):
+ # Get config
+ lxd_cfg = cfg.get('lxd')
+ if not lxd_cfg:
+ log.debug("Skipping module named %s, not present or disabled by cfg")
+ return
+ if not isinstance(lxd_cfg, dict):
+ log.warn("lxd config must be a dictionary. found a '%s'",
+ type(lxd_cfg))
+ return
+
+ init_cfg = lxd_cfg.get('init')
+ if not isinstance(init_cfg, dict):
+ log.warn("lxd/init config must be a dictionary. found a '%s'",
+ type(init_cfg))
+ init_cfg = {}
+
+ if not init_cfg:
+ log.debug("no lxd/init config. disabled.")
+ return
+
+ packages = []
+ # Ensure lxd is installed
+ if not util.which("lxd"):
+ packages.append('lxd')
+
+ # if using zfs, get the utils
+ if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
+ packages.append('zfs')
+
+ if len(packages):
+ try:
+ cloud.distro.install_packages(packages)
+ except util.ProcessExecutionError as exc:
+ log.warn("failed to install packages %s: %s", packages, exc)
+ return
+
+ # Set up lxd if init config is given
+ init_keys = (
+ 'network_address', 'network_port', 'storage_backend',
+ 'storage_create_device', 'storage_create_loop',
+ 'storage_pool', 'trust_password')
+ cmd = ['lxd', 'init', '--auto']
+ for k in init_keys:
+ if init_cfg.get(k):
+ cmd.extend(["--%s=%s" %
+ (k.replace('_', '-'), str(init_cfg[k]))])
+ util.subp(cmd)
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index b670390d..425420ae 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -19,7 +19,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
+import six
+from six import StringIO
# Used since this can maintain comments
# and doesn't need a top level section
@@ -51,17 +52,17 @@ def handle(name, cfg, cloud, log, _args):
# original file in order to be able to mix the rest up
mcollective_config = ConfigObj(SERVER_CFG)
# See: http://tiny.cc/jh9agw
- for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems():
+ for (cfg_name, cfg) in mcollective_cfg['conf'].items():
if cfg_name == 'public-cert':
- util.write_file(PUBCERT_FILE, cfg, mode=0644)
+ util.write_file(PUBCERT_FILE, cfg, mode=0o644)
mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE
mcollective_config['securityprovider'] = 'ssl'
elif cfg_name == 'private-cert':
- util.write_file(PRICERT_FILE, cfg, mode=0600)
+ util.write_file(PRICERT_FILE, cfg, mode=0o600)
mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE
mcollective_config['securityprovider'] = 'ssl'
else:
- if isinstance(cfg, (basestring, str)):
+ if isinstance(cfg, six.string_types):
# Just set it in the 'main' section
mcollective_config[cfg_name] = cfg
elif isinstance(cfg, (dict)):
@@ -69,7 +70,7 @@ def handle(name, cfg, cloud, log, _args):
# if it is needed and then add/or create items as needed
if cfg_name not in mcollective_config.sections:
mcollective_config[cfg_name] = {}
- for (o, v) in cfg.iteritems():
+ for (o, v) in cfg.items():
mcollective_config[cfg_name][o] = v
else:
# Otherwise just try to convert it to a string
@@ -81,7 +82,7 @@ def handle(name, cfg, cloud, log, _args):
contents = StringIO()
mcollective_config.write(contents)
contents = contents.getvalue()
- util.write_file(SERVER_CFG, contents, mode=0644)
+ util.write_file(SERVER_CFG, contents, mode=0o644)
# Start mcollective
util.subp(['service', 'mcollective', 'start'], capture=False)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 1cb1e839..4fe3ee21 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -28,15 +28,15 @@ from cloudinit import type_utils
from cloudinit import util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
-SHORTNAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
-SHORTNAME = re.compile(SHORTNAME_FILTER)
+DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
+DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
LOG = logging.getLogger(__name__)
-def is_mdname(name):
+def is_meta_device_name(name):
# return true if this is a metadata service name
if name in ["ami", "root", "swap"]:
return True
@@ -48,6 +48,25 @@ def is_mdname(name):
return False
+def _get_nth_partition_for_device(device_path, partition_number):
+ potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
+ '-part%s' % (partition_number,)]
+ for suffix in potential_suffixes:
+ potential_partition_device = '%s%s' % (device_path, suffix)
+ if os.path.exists(potential_partition_device):
+ return potential_partition_device
+ return None
+
+
+def _is_block_device(device_path, partition_path=None):
+ device_name = os.path.realpath(device_path).split('/')[-1]
+ sys_path = os.path.join('/sys/block/', device_name)
+ if partition_path is not None:
+ sys_path = os.path.join(
+ sys_path, os.path.realpath(partition_path).split('/')[-1])
+ return os.path.exists(sys_path)
+
+
def sanitize_devname(startname, transformer, log):
log.debug("Attempting to determine the real name of %s", startname)
@@ -58,21 +77,34 @@ def sanitize_devname(startname, transformer, log):
devname = "ephemeral0"
log.debug("Adjusted mount option from ephemeral to ephemeral0")
- (blockdev, part) = util.expand_dotted_devname(devname)
+ device_path, partition_number = util.expand_dotted_devname(devname)
- if is_mdname(blockdev):
- orig = blockdev
- blockdev = transformer(blockdev)
- if not blockdev:
+ if is_meta_device_name(device_path):
+ orig = device_path
+ device_path = transformer(device_path)
+ if not device_path:
return None
- if not blockdev.startswith("/"):
- blockdev = "/dev/%s" % blockdev
- log.debug("Mapped metadata name %s to %s", orig, blockdev)
+ if not device_path.startswith("/"):
+ device_path = "/dev/%s" % (device_path,)
+ log.debug("Mapped metadata name %s to %s", orig, device_path)
+ else:
+ if DEVICE_NAME_RE.match(startname):
+ device_path = "/dev/%s" % (device_path,)
+
+ partition_path = None
+ if partition_number is None:
+ partition_path = _get_nth_partition_for_device(device_path, 1)
else:
- if SHORTNAME.match(startname):
- blockdev = "/dev/%s" % blockdev
+ partition_path = _get_nth_partition_for_device(device_path,
+ partition_number)
+ if partition_path is None:
+ return None
- return devnode_for_dev_part(blockdev, part)
+ if _is_block_device(device_path, partition_path):
+ if partition_path is not None:
+ return partition_path
+ return device_path
+ return None
def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
@@ -172,11 +204,12 @@ def setup_swapfile(fname, size=None, maxsize=None):
try:
util.ensure_dir(tdir)
util.log_time(LOG.debug, msg, func=util.subp,
- args=[['sh', '-c',
- ('rm -f "$1" && umask 0066 && '
- 'dd if=/dev/zero "of=$1" bs=1M "count=$2" && '
- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
- 'setup_swap', fname, mbsize]])
+ args=[['sh', '-c',
+ ('rm -f "$1" && umask 0066 && '
+ '{ fallocate -l "${2}M" "$1" || '
+ ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
+ 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
+ 'setup_swap', fname, mbsize]])
except Exception as e:
raise IOError("Failed %s: %s" % (msg, e))
@@ -230,7 +263,11 @@ def handle_swapcfg(swapcfg):
def handle(_name, cfg, cloud, log, _args):
# fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
- defvals = [None, None, "auto", "defaults,nobootwait", "0", "2"]
+ def_mnt_opts = "defaults,nobootwait"
+ if cloud.distro.uses_systemd():
+ def_mnt_opts = "defaults,nofail"
+
+ defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
defvals = cfg.get("mount_default_fields", defvals)
# these are our default set of mounts
@@ -366,49 +403,3 @@ def handle(_name, cfg, cloud, log, _args):
util.subp(("mount", "-a"))
except:
util.logexc(log, "Activating mounts via 'mount -a' failed")
-
-
-def devnode_for_dev_part(device, partition):
- """
- Find the name of the partition. While this might seem rather
- straight forward, its not since some devices are '<device><partition>'
- while others are '<device>p<partition>'. For example, /dev/xvda3 on EC2
- will present as /dev/xvda3p1 for the first partition since /dev/xvda3 is
- a block device.
- """
- if not os.path.exists(device):
- return None
-
- short_name = os.path.basename(device)
- sys_path = "/sys/block/%s" % short_name
-
- if not os.path.exists(sys_path):
- LOG.debug("did not find entry for %s in /sys/block", short_name)
- return None
-
- sys_long_path = sys_path + "/" + short_name
-
- if partition is not None:
- partition = str(partition)
-
- if partition is None:
- valid_mappings = [sys_long_path + "1", sys_long_path + "p1"]
- elif partition != "0":
- valid_mappings = [sys_long_path + "%s" % partition,
- sys_long_path + "p%s" % partition]
- else:
- valid_mappings = []
-
- for cdisk in valid_mappings:
- if not os.path.exists(cdisk):
- continue
-
- dev_path = "/dev/%s" % os.path.basename(cdisk)
- if os.path.exists(dev_path):
- return dev_path
-
- if partition is None or partition == "0":
- return device
-
- LOG.debug("Did not fine partition %s for device %s", partition, device)
- return None
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 5bc68b83..18a7ddad 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -81,7 +81,7 @@ def handle(name, cfg, cloud, log, args):
'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
}
- for (n, path) in pubkeys.iteritems():
+ for (n, path) in pubkeys.items():
try:
all_keys[n] = util.load_file(path)
except:
@@ -99,7 +99,7 @@ def handle(name, cfg, cloud, log, args):
# Get them read to be posted
real_submit_keys = {}
- for (k, v) in submit_keys.iteritems():
+ for (k, v) in submit_keys.items():
if v is None:
real_submit_keys[k] = 'N/A'
else:
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 09d37371..cc3f7f70 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -22,6 +22,7 @@ from cloudinit import util
import errno
import os
import re
+import six
import subprocess
import time
@@ -48,10 +49,40 @@ def givecmdline(pid):
return None
+def check_condition(cond, log=None):
+ if isinstance(cond, bool):
+ if log:
+ log.debug("Static Condition: %s" % cond)
+ return cond
+
+ pre = "check_condition command (%s): " % cond
+ try:
+ proc = subprocess.Popen(cond, shell=not isinstance(cond, list))
+ proc.communicate()
+ ret = proc.returncode
+ if ret == 0:
+ if log:
+ log.debug(pre + "exited 0. condition met.")
+ return True
+ elif ret == 1:
+ if log:
+ log.debug(pre + "exited 1. condition not met.")
+ return False
+ else:
+ if log:
+ log.warn(pre + "unexpected exit %s. " % ret +
+ "do not apply change.")
+ return False
+ except Exception as e:
+ if log:
+ log.warn(pre + "Unexpected error: %s" % e)
+ return False
+
+
def handle(_name, cfg, _cloud, log, _args):
try:
- (args, timeout) = load_power_state(cfg)
+ (args, timeout, condition) = load_power_state(cfg)
if args is None:
log.debug("no power_state provided. doing nothing")
return
@@ -59,6 +90,10 @@ def handle(_name, cfg, _cloud, log, _args):
log.warn("%s Not performing power state change!" % str(e))
return
+ if condition is False:
+ log.debug("Condition was false. Will not perform state change.")
+ return
+
mypid = os.getpid()
cmdline = givecmdline(mypid)
@@ -70,8 +105,8 @@ def handle(_name, cfg, _cloud, log, _args):
log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
- util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, execmd,
- [args, devnull_fp])
+ util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
+ condition, execmd, [args, devnull_fp])
def load_power_state(cfg):
@@ -80,7 +115,7 @@ def load_power_state(cfg):
pstate = cfg.get('power_state')
if pstate is None:
- return (None, None)
+ return (None, None, None)
if not isinstance(pstate, dict):
raise TypeError("power_state is not a dict.")
@@ -115,7 +150,10 @@ def load_power_state(cfg):
raise ValueError("failed to convert timeout '%s' to float." %
pstate['timeout'])
- return (args, timeout)
+ condition = pstate.get("condition", True)
+ if not isinstance(condition, six.string_types + (list, bool)):
+ raise TypeError("condition type %s invalid. must be list, bool, str")
+ return (args, timeout, condition)
def doexit(sysexit):
@@ -133,7 +171,7 @@ def execmd(exe_args, output=None, data_in=None):
doexit(ret)
-def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
+def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
# wait until pid, with /proc/pid/cmdline contents of pidcmdline
# is no longer alive. After it is gone, or timeout has passed
# execute func(args)
@@ -175,4 +213,11 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
if log:
log.debug(msg)
+
+ try:
+ if not check_condition(condition, log):
+ return
+ except Exception as e:
+ fatal("Unexpected Exception when checking condition: %s" % e)
+
func(*args)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 471a1a8a..774d3322 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -18,7 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
+from six import StringIO
import os
import socket
@@ -36,8 +36,8 @@ def _autostart_puppet(log):
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
util.subp(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)
+ '-e', 's/^START=.*/START=yes/',
+ '/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
@@ -65,7 +65,7 @@ def handle(name, cfg, cloud, log, _args):
" doing nothing."))
elif install:
log.debug(("Attempting to install puppet %s,"),
- version if version else 'latest')
+ version if version else 'latest')
cloud.distro.install_packages(('puppet', version))
# ... and then update the puppet configuration
@@ -81,22 +81,22 @@ def handle(name, cfg, cloud, log, _args):
cleaned_contents = '\n'.join(cleaned_lines)
puppet_config.readfp(StringIO(cleaned_contents),
filename=PUPPET_CONF_PATH)
- for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
+ for (cfg_name, cfg) in puppet_cfg['conf'].items():
# Cert configuration is a special case
# Dump the puppet master ca certificate in the correct place
if cfg_name == 'ca_cert':
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
- util.ensure_dir(PUPPET_SSL_DIR, 0771)
+ util.ensure_dir(PUPPET_SSL_DIR, 0o771)
util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
util.ensure_dir(PUPPET_SSL_CERT_DIR)
util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
- util.write_file(PUPPET_SSL_CERT_PATH, str(cfg))
+ util.write_file(PUPPET_SSL_CERT_PATH, cfg)
util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
else:
# Iterate throug the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
- for (o, v) in cfg.iteritems():
+ for (o, v) in cfg.items():
if o == 'certname':
# Expand %f as the fqdn
# TODO(harlowja) should this use the cloud fqdn??
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index cbc07853..2a2a9f59 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -166,7 +166,7 @@ def handle(name, cfg, _cloud, log, args):
func=do_resize, args=(resize_cmd, log))
else:
util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
+ func=do_resize, args=(resize_cmd, log))
action = 'Resized'
if resize_root == NOBLOCK:
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index bbaa6c63..71d9e3a7 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -66,8 +66,8 @@ def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
false_flags = []
if 'options' in params:
- for key, val in params['options'].iteritems():
- if type(val) == bool:
+ for key, val in params['options'].items():
+ if isinstance(val, bool):
if val:
flags.append(key)
else:
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
new file mode 100644
index 00000000..6087c45c
--- /dev/null
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -0,0 +1,402 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# Author: Brent Baude <bbaude@redhat.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import util
+
+
+def handle(_name, cfg, _cloud, log, _args):
+ sm = SubscriptionManager(cfg)
+ sm.log = log
+ if not sm.is_registered:
+ try:
+ verify, verify_msg = sm._verify_keys()
+ if verify is not True:
+ raise SubscriptionError(verify_msg)
+ cont = sm.rhn_register()
+ if not cont:
+ raise SubscriptionError("Registration failed or did not "
+ "run completely")
+
+ # Splitting up the registration, auto-attach, and servicelevel
+ # commands because the error codes, messages from subman are not
+ # specific enough.
+
+ # Attempt to change the service level
+ if sm.auto_attach and sm.servicelevel is not None:
+ if not sm._set_service_level():
+ raise SubscriptionError("Setting of service-level "
+ "failed")
+ else:
+ sm.log.debug("Completed auto-attach with service level")
+ elif sm.auto_attach:
+ if not sm._set_auto_attach():
+ raise SubscriptionError("Setting auto-attach failed")
+ else:
+ sm.log.debug("Completed auto-attach")
+
+ if sm.pools is not None:
+ if not isinstance(sm.pools, list):
+ pool_fail = "Pools must in the format of a list"
+ raise SubscriptionError(pool_fail)
+
+ return_stat = sm.addPool(sm.pools)
+ if not return_stat:
+ raise SubscriptionError("Unable to attach pools {0}"
+ .format(sm.pools))
+ if (sm.enable_repo is not None) or (sm.disable_repo is not None):
+ return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo)
+ if not return_stat:
+ raise SubscriptionError("Unable to add or remove repos")
+ sm.log_success("rh_subscription plugin completed successfully")
+ except SubscriptionError as e:
+ sm.log_warn(str(e))
+ sm.log_warn("rh_subscription plugin did not complete successfully")
+ else:
+ sm.log_success("System is already registered")
+
+
+class SubscriptionError(Exception):
+ pass
+
+
+class SubscriptionManager(object):
+ valid_rh_keys = ['org', 'activation-key', 'username', 'password',
+ 'disable-repo', 'enable-repo', 'add-pool',
+ 'rhsm-baseurl', 'server-hostname',
+ 'auto-attach', 'service-level']
+
+ def __init__(self, cfg):
+ self.cfg = cfg
+ self.rhel_cfg = self.cfg.get('rh_subscription', {})
+ self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
+ self.server_hostname = self.rhel_cfg.get('server-hostname')
+ self.pools = self.rhel_cfg.get('add-pool')
+ self.activation_key = self.rhel_cfg.get('activation-key')
+ self.org = self.rhel_cfg.get('org')
+ self.userid = self.rhel_cfg.get('username')
+ self.password = self.rhel_cfg.get('password')
+ self.auto_attach = self.rhel_cfg.get('auto-attach')
+ self.enable_repo = self.rhel_cfg.get('enable-repo')
+ self.disable_repo = self.rhel_cfg.get('disable-repo')
+ self.servicelevel = self.rhel_cfg.get('service-level')
+ self.subman = ['subscription-manager']
+ self.is_registered = self._is_registered()
+
+ def log_success(self, msg):
+ '''Simple wrapper for logging info messages. Useful for unittests'''
+ self.log.info(msg)
+
+ def log_warn(self, msg):
+ '''Simple wrapper for logging warning messages. Useful for unittests'''
+ self.log.warn(msg)
+
+ def _verify_keys(self):
+ '''
+ Checks that the keys in the rh_subscription dict from the user-data
+ are what we expect.
+ '''
+
+ for k in self.rhel_cfg:
+ if k not in self.valid_rh_keys:
+ bad_key = "{0} is not a valid key for rh_subscription. "\
+ "Valid keys are: "\
+ "{1}".format(k, ', '.join(self.valid_rh_keys))
+ return False, bad_key
+
+ # Check for bad auto-attach value
+ if (self.auto_attach is not None) and \
+ not (util.is_true(self.auto_attach) or
+ util.is_false(self.auto_attach)):
+ not_bool = "The key auto-attach must be a boolean value "\
+ "(True/False "
+ return False, not_bool
+
+ if (self.servicelevel is not None) and ((not self.auto_attach) or
+ (util.is_false(str(self.auto_attach)))):
+ no_auto = ("The service-level key must be used in conjunction "
+ "with the auto-attach key. Please re-run with "
+ "auto-attach: True")
+ return False, no_auto
+ return True, None
+
+ def _is_registered(self):
+ '''
+ Checks if the system is already registered and returns
+ True if so, else False
+ '''
+ cmd = ['identity']
+
+ try:
+ self._sub_man_cli(cmd)
+ except util.ProcessExecutionError:
+ return False
+
+ return True
+
+ def _sub_man_cli(self, cmd, logstring_val=False):
+ '''
+ Uses the prefered cloud-init subprocess def of util.subp
+ and runs subscription-manager. Breaking this to a
+ separate function for later use in mocking and unittests
+ '''
+ cmd = self.subman + cmd
+ return util.subp(cmd, logstring=logstring_val)
+
+ def rhn_register(self):
+ '''
+ Registers the system by userid and password or activation key
+ and org. Returns True when successful False when not.
+ '''
+
+ if (self.activation_key is not None) and (self.org is not None):
+ # register by activation key
+ cmd = ['register', '--activationkey={0}'.
+ format(self.activation_key), '--org={0}'.format(self.org)]
+
+ # If the baseurl and/or server url are passed in, we register
+ # with them.
+
+ if self.rhsm_baseurl is not None:
+ cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
+
+ if self.server_hostname is not None:
+ cmd.append("--serverurl={0}".format(self.server_hostname))
+
+ try:
+ return_out, return_err = self._sub_man_cli(cmd,
+ logstring_val=True)
+ except util.ProcessExecutionError as e:
+ if e.stdout == "":
+ self.log_warn("Registration failed due "
+ "to: {0}".format(e.stderr))
+ return False
+
+ elif (self.userid is not None) and (self.password is not None):
+ # register by username and password
+ cmd = ['register', '--username={0}'.format(self.userid),
+ '--password={0}'.format(self.password)]
+
+ # If the baseurl and/or server url are passed in, we register
+ # with them.
+
+ if self.rhsm_baseurl is not None:
+ cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
+
+ if self.server_hostname is not None:
+ cmd.append("--serverurl={0}".format(self.server_hostname))
+
+ # Attempting to register the system only
+ try:
+ return_out, return_err = self._sub_man_cli(cmd,
+ logstring_val=True)
+ except util.ProcessExecutionError as e:
+ if e.stdout == "":
+ self.log_warn("Registration failed due "
+ "to: {0}".format(e.stderr))
+ return False
+
+ else:
+ self.log_warn("Unable to register system due to incomplete "
+ "information.")
+ self.log_warn("Use either activationkey and org *or* userid "
+ "and password")
+ return False
+
+ reg_id = return_out.split("ID: ")[1].rstrip()
+ self.log.debug("Registered successfully with ID {0}".format(reg_id))
+ return True
+
+ def _set_service_level(self):
+ cmd = ['attach', '--auto', '--servicelevel={0}'
+ .format(self.servicelevel)]
+
+ try:
+ return_out, return_err = self._sub_man_cli(cmd)
+ except util.ProcessExecutionError as e:
+ if e.stdout.rstrip() != '':
+ for line in e.stdout.split("\n"):
+ if line is not '':
+ self.log_warn(line)
+ else:
+ self.log_warn("Setting the service level failed with: "
+ "{0}".format(e.stderr.strip()))
+ return False
+ for line in return_out.split("\n"):
+ if line is not "":
+ self.log.debug(line)
+ return True
+
+ def _set_auto_attach(self):
+ cmd = ['attach', '--auto']
+ try:
+ return_out, return_err = self._sub_man_cli(cmd)
+ except util.ProcessExecutionError:
+ self.log_warn("Auto-attach failed with: "
+ "{0}]".format(return_err.strip()))
+ return False
+ for line in return_out.split("\n"):
+ if line is not "":
+ self.log.debug(line)
+ return True
+
+ def _getPools(self):
+ '''
+ Gets the list pools for the active subscription and returns them
+ in list form.
+ '''
+ available = []
+ consumed = []
+
+ # Get all available pools
+ cmd = ['list', '--available', '--pool-only']
+ results, errors = self._sub_man_cli(cmd)
+ available = (results.rstrip()).split("\n")
+
+ # Get all consumed pools
+ cmd = ['list', '--consumed', '--pool-only']
+ results, errors = self._sub_man_cli(cmd)
+ consumed = (results.rstrip()).split("\n")
+
+ return available, consumed
+
+ def _getRepos(self):
+ '''
+ Obtains the current list of active yum repositories and returns
+ them in list form.
+ '''
+
+ cmd = ['repos', '--list-enabled']
+ return_out, return_err = self._sub_man_cli(cmd)
+ active_repos = []
+ for repo in return_out.split("\n"):
+ if "Repo ID:" in repo:
+ active_repos.append((repo.split(':')[1]).strip())
+
+ cmd = ['repos', '--list-disabled']
+ return_out, return_err = self._sub_man_cli(cmd)
+
+ inactive_repos = []
+ for repo in return_out.split("\n"):
+ if "Repo ID:" in repo:
+ inactive_repos.append((repo.split(':')[1]).strip())
+ return active_repos, inactive_repos
+
+ def addPool(self, pools):
+ '''
+ Takes a list of subscription pools and "attaches" them to the
+ current subscription
+ '''
+
+ # An empty list was passed
+ if len(pools) == 0:
+ self.log.debug("No pools to attach")
+ return True
+
+ pool_available, pool_consumed = self._getPools()
+ pool_list = []
+ cmd = ['attach']
+ for pool in pools:
+ if (pool not in pool_consumed) and (pool in pool_available):
+ pool_list.append('--pool={0}'.format(pool))
+ else:
+ self.log_warn("Pool {0} is not available".format(pool))
+ if len(pool_list) > 0:
+ cmd.extend(pool_list)
+ try:
+ self._sub_man_cli(cmd)
+ self.log.debug("Attached the following pools to your "
+ "system: %s" % (", ".join(pool_list))
+ .replace('--pool=', ''))
+ return True
+ except util.ProcessExecutionError as e:
+ self.log_warn("Unable to attach pool {0} "
+ "due to {1}".format(pool, e))
+ return False
+
+ def update_repos(self, erepos, drepos):
+ '''
+ Takes a list of yum repo ids that need to be disabled or enabled; then
+ it verifies if they are already enabled or disabled and finally
+ executes the action to disable or enable
+ '''
+
+ if (erepos is not None) and (not isinstance(erepos, list)):
+ self.log_warn("Repo IDs must in the format of a list.")
+ return False
+
+ if (drepos is not None) and (not isinstance(drepos, list)):
+ self.log_warn("Repo IDs must in the format of a list.")
+ return False
+
+ # Bail if both lists are not populated
+ if (len(erepos) == 0) and (len(drepos) == 0):
+ self.log.debug("No repo IDs to enable or disable")
+ return True
+
+ active_repos, inactive_repos = self._getRepos()
+ # Creating a list of repoids to be enabled
+ enable_list = []
+ enable_list_fail = []
+ for repoid in erepos:
+ if (repoid in inactive_repos):
+ enable_list.append("--enable={0}".format(repoid))
+ else:
+ enable_list_fail.append(repoid)
+
+ # Creating a list of repoids to be disabled
+ disable_list = []
+ disable_list_fail = []
+ for repoid in drepos:
+ if repoid in active_repos:
+ disable_list.append("--disable={0}".format(repoid))
+ else:
+ disable_list_fail.append(repoid)
+
+ # Logging any repos that are already enabled or disabled
+ if len(enable_list_fail) > 0:
+ for fail in enable_list_fail:
+ # Check if the repo exists or not
+ if fail in active_repos:
+ self.log.debug("Repo {0} is already enabled".format(fail))
+ else:
+ self.log_warn("Repo {0} does not appear to "
+ "exist".format(fail))
+ if len(disable_list_fail) > 0:
+ for fail in disable_list_fail:
+ self.log.debug("Repo {0} not disabled "
+ "because it is not enabled".format(fail))
+
+ cmd = ['repos']
+ if len(enable_list) > 0:
+ cmd.extend(enable_list)
+ if len(disable_list) > 0:
+ cmd.extend(disable_list)
+
+ try:
+ self._sub_man_cli(cmd)
+ except util.ProcessExecutionError as e:
+ self.log_warn("Unable to alter repos due to {0}".format(e))
+ return False
+
+ if len(enable_list) > 0:
+ self.log.debug("Enabled the following repos: %s" %
+ (", ".join(enable_list)).replace('--enable=', ''))
+ if len(disable_list) > 0:
+ self.log.debug("Disabled the following repos: %s" %
+ (", ".join(disable_list)).replace('--disable=', ''))
+ return True
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 7d2ec10a..0ecf3a4d 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -41,7 +41,7 @@ from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
-from urlparse import parse_qs
+from six.moves.urllib_parse import parse_qs
frequency = PER_INSTANCE
@@ -58,7 +58,7 @@ def handle(name, _cfg, cloud, log, _args):
try:
mdict = parse_qs(ud)
- if mdict or MY_HOOKNAME not in mdict:
+ if not mdict or MY_HOOKNAME not in mdict:
log.debug(("Skipping module %s, "
"did not find %s in parsed"
" raw userdata"), name, MY_HOOKNAME)
@@ -82,7 +82,7 @@ def handle(name, _cfg, cloud, log, _args):
resp = uhelp.readurl(url)
# Ensure its a valid http response (and something gotten)
if resp.ok() and resp.contents:
- util.write_file(fname, str(resp), mode=0700)
+ util.write_file(fname, resp, mode=0o700)
wrote_fns.append(fname)
except Exception as e:
captured_excps.append(e)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 57486edc..b8642d65 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -17,37 +17,166 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+rsyslog module allows configuration of syslog logging via rsyslog
+Configuration is done under the cloud-config top level 'rsyslog'.
+
+Under 'rsyslog' you can define:
+ - configs: [default=[]]
+ this is a list. entries in it are a string or a dictionary.
+ each entry has 2 parts:
+ * content
+ * filename
+ if the entry is a string, then it is assigned to 'content'.
+ for each entry, content is written to the provided filename.
+ if filename is not provided, its default is read from 'config_filename'
+
+ Content here can be any valid rsyslog configuration. No format
+ specific format is enforced.
+
+ For simply logging to an existing remote syslog server, via udp:
+ configs: ["*.* @192.168.1.1"]
+
+ - remotes: [default={}]
+ This is a dictionary of name / value pairs.
+ In comparison to 'config's, it is more focused in that it only supports
+ remote syslog configuration. It is not rsyslog specific, and could
+ convert to other syslog implementations.
+
+ Each entry in remotes is a 'name' and a 'value'.
+ * name: an string identifying the entry. good practice would indicate
+ using a consistent and identifiable string for the producer.
+ For example, the MAAS service could use 'maas' as the key.
+ * value consists of the following parts:
+ * optional filter for log messages
+ default if not present: *.*
+ * optional leading '@' or '@@' (indicates udp or tcp respectively).
+ default if not present (udp): @
+ This is rsyslog format for that. if not present, is '@'.
+ * ipv4 or ipv6 or hostname
+ ipv6 addresses must be in [::1] format. (@[fd00::1]:514)
+ * optional port
+ port defaults to 514
+
+ - config_filename: [default=20-cloud-config.conf]
+ this is the file name to use if none is provided in a config entry.
+
+ - config_dir: [default=/etc/rsyslog.d]
+ this directory is used for filenames that are not absolute paths.
+
+ - service_reload_command: [default="auto"]
+ this command is executed if files have been written and thus the syslog
+ daemon needs to be told.
+
+Note, since cloud-init 0.5 a legacy version of rsyslog config has been
+present and is still supported. See below for the mappings between old
+value and new value:
+ old value -> new value
+ 'rsyslog' -> rsyslog/configs
+ 'rsyslog_filename' -> rsyslog/config_filename
+ 'rsyslog_dir' -> rsyslog/config_dir
+
+the legacy config does not support 'service_reload_command'.
+
+Example config:
+ #cloud-config
+ rsyslog:
+ configs:
+ - "*.* @@192.158.1.1"
+ - content: "*.* @@192.0.2.1:10514"
+ filename: 01-example.conf
+ - content: |
+ *.* @@syslogd.example.com
+ remotes:
+ maas: "192.168.1.1"
+ juju: "10.0.4.1"
+ config_dir: config_dir
+ config_filename: config_filename
+ service_reload_command: [your, syslog, restart, command]
+
+Example Legacy config:
+ #cloud-config
+ rsyslog:
+ - "*.* @@192.158.1.1"
+ rsyslog_dir: /etc/rsyslog-config.d/
+ rsyslog_filename: 99-local.conf
+"""
import os
+import re
+import six
+from cloudinit import log as logging
from cloudinit import util
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
+DEF_RELOAD = "auto"
+DEF_REMOTES = {}
+KEYNAME_CONFIGS = 'configs'
+KEYNAME_FILENAME = 'config_filename'
+KEYNAME_DIR = 'config_dir'
+KEYNAME_RELOAD = 'service_reload_command'
+KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
+KEYNAME_LEGACY_DIR = 'rsyslog_dir'
+KEYNAME_REMOTES = 'remotes'
-def handle(name, cfg, cloud, log, _args):
- # rsyslog:
- # - "*.* @@192.158.1.1"
- # - content: "*.* @@192.0.2.1:10514"
- # - filename: 01-examplecom.conf
- # content: |
- # *.* @@syslogd.example.com
-
- # process 'rsyslog'
- if 'rsyslog' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'rsyslog' key in configuration"), name)
- return
+LOG = logging.getLogger(__name__)
- def_dir = cfg.get('rsyslog_dir', DEF_DIR)
- def_fname = cfg.get('rsyslog_filename', DEF_FILENAME)
+COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
+HOST_PORT_RE = re.compile(
+ r'^(?P<proto>[@]{0,2})'
+ '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
+ '([:](?P<port>[0-9]+))?$')
+
+def reload_syslog(command=DEF_RELOAD, systemd=False):
+ service = 'rsyslog'
+ if command == DEF_RELOAD:
+ if systemd:
+ cmd = ['systemctl', 'reload-or-try-restart', service]
+ else:
+ cmd = ['service', service, 'restart']
+ else:
+ cmd = command
+ util.subp(cmd, capture=True)
+
+
+def load_config(cfg):
+ # return an updated config with entries of the correct type
+ # support converting the old top level format into new format
+ mycfg = cfg.get('rsyslog', {})
+
+ if isinstance(cfg.get('rsyslog'), list):
+ mycfg = {KEYNAME_CONFIGS: cfg.get('rsyslog')}
+ if KEYNAME_LEGACY_FILENAME in cfg:
+ mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
+ if KEYNAME_LEGACY_DIR in cfg:
+ mycfg[KEYNAME_DIR] = cfg[KEYNAME_LEGACY_DIR]
+
+ fillup = (
+ (KEYNAME_CONFIGS, [], list),
+ (KEYNAME_DIR, DEF_DIR, six.string_types),
+ (KEYNAME_FILENAME, DEF_FILENAME, six.string_types),
+ (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)),
+ (KEYNAME_REMOTES, DEF_REMOTES, dict))
+
+ for key, default, vtypes in fillup:
+ if key not in mycfg or not isinstance(mycfg[key], vtypes):
+ mycfg[key] = default
+
+ return mycfg
+
+
+def apply_rsyslog_changes(configs, def_fname, cfg_dir):
+ # apply the changes in 'configs' to the paths in def_fname and cfg_dir
+ # return a list of the files changed
files = []
- for i, ent in enumerate(cfg['rsyslog']):
+ for cur_pos, ent in enumerate(configs):
if isinstance(ent, dict):
if "content" not in ent:
- log.warn("No 'content' entry in config entry %s", i + 1)
+ LOG.warn("No 'content' entry in config entry %s", cur_pos + 1)
continue
content = ent['content']
filename = ent.get("filename", def_fname)
@@ -57,11 +186,10 @@ def handle(name, cfg, cloud, log, _args):
filename = filename.strip()
if not filename:
- log.warn("Entry %s has an empty filename", i + 1)
+ LOG.warn("Entry %s has an empty filename", cur_pos + 1)
continue
- if not filename.startswith("/"):
- filename = os.path.join(def_dir, filename)
+ filename = os.path.join(cfg_dir, filename)
# Truncate filename first time you see it
omode = "ab"
@@ -70,27 +198,164 @@ def handle(name, cfg, cloud, log, _args):
files.append(filename)
try:
- contents = "%s\n" % (content)
- util.write_file(filename, contents, omode=omode)
+ endl = ""
+ if not content.endswith("\n"):
+ endl = "\n"
+ util.write_file(filename, content + endl, omode=omode)
except Exception:
- util.logexc(log, "Failed to write to %s", filename)
+ util.logexc(LOG, "Failed to write to %s", filename)
+
+ return files
+
+
+def parse_remotes_line(line, name=None):
+ try:
+ data, comment = COMMENT_RE.split(line)
+ comment = comment.strip()
+ except ValueError:
+ data, comment = (line, None)
+
+ toks = data.strip().split()
+ match = None
+ if len(toks) == 1:
+ host_port = data
+ elif len(toks) == 2:
+ match, host_port = toks
+ else:
+ raise ValueError("line had multiple spaces: %s" % data)
+
+ toks = HOST_PORT_RE.match(host_port)
+
+ if not toks:
+ raise ValueError("Invalid host specification '%s'" % host_port)
+
+ proto = toks.group('proto')
+ addr = toks.group('addr') or toks.group('bracket_addr')
+ port = toks.group('port')
+
+ if addr.startswith("[") and not addr.endswith("]"):
+ raise ValueError("host spec had invalid brackets: %s" % addr)
+
+ if comment and not name:
+ name = comment
+
+ t = SyslogRemotesLine(name=name, match=match, proto=proto,
+ addr=addr, port=port)
+ t.validate()
+ return t
+
+
+class SyslogRemotesLine(object):
+ def __init__(self, name=None, match=None, proto=None, addr=None,
+ port=None):
+ if not match:
+ match = "*.*"
+ self.name = name
+ self.match = match
+ if not proto:
+ proto = "udp"
+ if proto == "@":
+ proto = "udp"
+ elif proto == "@@":
+ proto = "tcp"
+ self.proto = proto
+
+ self.addr = addr
+ if port:
+ self.port = int(port)
+ else:
+ self.port = None
+
+ def validate(self):
+ if self.port:
+ try:
+ int(self.port)
+ except ValueError:
+ raise ValueError("port '%s' is not an integer" % self.port)
+
+ if not self.addr:
+ raise ValueError("address is required")
+
+ def __repr__(self):
+ return "[name=%s match=%s proto=%s address=%s port=%s]" % (
+ self.name, self.match, self.proto, self.addr, self.port
+ )
+
+ def __str__(self):
+ buf = self.match + " "
+ if self.proto == "udp":
+ buf += "@"
+ elif self.proto == "tcp":
+ buf += "@@"
+
+ if ":" in self.addr:
+ buf += "[" + self.addr + "]"
+ else:
+ buf += self.addr
+
+ if self.port:
+ buf += ":%s" % self.port
+
+ if self.name:
+ buf += " # %s" % self.name
+ return buf
+
+
+def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
+ if not remotes:
+ return None
+ lines = []
+ if header is not None:
+ lines.append(header)
+ for name, line in remotes.items():
+ if not line:
+ continue
+ try:
+ lines.append(str(parse_remotes_line(line, name=name)))
+ except ValueError as e:
+ LOG.warn("failed loading remote %s: %s [%s]", name, line, e)
+ if footer is not None:
+ lines.append(footer)
+ return '\n'.join(lines) + "\n"
+
+
+def handle(name, cfg, cloud, log, _args):
+ if 'rsyslog' not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'rsyslog' key in configuration"), name)
+ return
+
+ mycfg = load_config(cfg)
+ configs = mycfg[KEYNAME_CONFIGS]
+
+ if mycfg[KEYNAME_REMOTES]:
+ configs.append(
+ remotes_to_rsyslog_cfg(
+ mycfg[KEYNAME_REMOTES],
+ header="# begin remotes",
+ footer="# end remotes",
+ ))
+
+ if not mycfg['configs']:
+ log.debug("Empty config rsyslog['configs'], nothing to do")
+ return
+
+ changes = apply_rsyslog_changes(
+ configs=mycfg[KEYNAME_CONFIGS],
+ def_fname=mycfg[KEYNAME_FILENAME],
+ cfg_dir=mycfg[KEYNAME_DIR])
+
+ if not changes:
+ log.debug("restart of syslog not necessary, no changes made")
+ return
- # Attempt to restart syslogd
- restarted = False
try:
- # If this config module is running at cloud-init time
- # (before rsyslog is running) we don't actually have to
- # restart syslog.
- #
- # Upstart actually does what we want here, in that it doesn't
- # start a service that wasn't running already on 'restart'
- # it will also return failure on the attempt, so 'restarted'
- # won't get set.
- log.debug("Restarting rsyslog")
- util.subp(['service', 'rsyslog', 'restart'])
- restarted = True
- except Exception:
- util.logexc(log, "Failed restarting rsyslog")
+ restarted = reload_syslog(
+ command=mycfg[KEYNAME_RELOAD],
+ systemd=cloud.distro.uses_systemd()),
+ except util.ProcessExecutionError as e:
+ restarted = False
+ log.warn("Failed to reload syslog", e)
if restarted:
# This only needs to run if we *actually* restarted
@@ -98,4 +363,4 @@ def handle(name, cfg, cloud, log, _args):
cloud.cycle_logging()
# This should now use rsyslog if
# the logging was setup to use it...
- log.debug("%s configured %s files", name, files)
+ log.debug("%s configured %s files", name, changes)
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 598c3a3e..66dc3363 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args):
cmd = cfg["runcmd"]
try:
content = util.shellify(cmd)
- util.write_file(out_fn, content, 0700)
+ util.write_file(out_fn, content, 0o700)
except:
util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 53013dcb..f5786a31 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -47,7 +47,7 @@ def handle(name, cfg, cloud, log, _args):
# ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
- with util.umask(077):
+ with util.umask(0o77):
util.ensure_dir(pki_dir)
pub_name = os.path.join(pki_dir, 'minion.pub')
pem_name = os.path.join(pki_dir, 'minion.pem')
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 49a6b3e8..1b011216 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -21,7 +21,8 @@
import base64
import os
-from StringIO import StringIO
+
+from six import BytesIO
from cloudinit.settings import PER_INSTANCE
from cloudinit import log as logging
@@ -33,13 +34,13 @@ LOG = logging.getLogger(__name__)
def _decode(data, encoding=None):
if not data:
- return ''
+ return b''
if not encoding or encoding.lower() in ['raw']:
- return data
+ return util.encode_text(data)
elif encoding.lower() in ['base64', 'b64']:
return base64.b64decode(data)
elif encoding.lower() in ['gzip', 'gz']:
- return util.decomp_gzip(data, quiet=False)
+ return util.decomp_gzip(data, quiet=False, decode=None)
else:
raise IOError("Unknown random_seed encoding: %s" % (encoding))
@@ -64,9 +65,9 @@ def handle_random_seed_command(command, required, env=None):
def handle(name, cfg, cloud, log, _args):
mycfg = cfg.get('random_seed', {})
seed_path = mycfg.get('file', '/dev/urandom')
- seed_data = mycfg.get('data', '')
+ seed_data = mycfg.get('data', b'')
- seed_buf = StringIO()
+ seed_buf = BytesIO()
if seed_data:
seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding')))
@@ -74,7 +75,7 @@ def handle(name, cfg, cloud, log, _args):
# openstack meta_data.json
metadata = cloud.datasource.metadata
if metadata and 'random_seed' in metadata:
- seed_buf.write(metadata['random_seed'])
+ seed_buf.write(util.encode_text(metadata['random_seed']))
seed_data = seed_buf.getvalue()
if len(seed_data):
@@ -82,7 +83,7 @@ def handle(name, cfg, cloud, log, _args):
len(seed_data), seed_path)
util.append_file(seed_path, seed_data)
- command = mycfg.get('command', ['pollinate', '-q'])
+ command = mycfg.get('command', None)
req = mycfg.get('command_required', False)
try:
env = os.environ.copy()
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 5d7f4331..f43d8d5a 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -24,7 +24,7 @@ from cloudinit import util
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
log.debug(("Configuration option 'preserve_hostname' is set,"
- " not setting the hostname in module %s"), name)
+ " not setting the hostname in module %s"), name)
return
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 4ca85e21..58e1b713 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -28,11 +28,11 @@ from cloudinit import distros as ds
from cloudinit import ssh_util
from cloudinit import util
-from string import letters, digits
+from string import ascii_letters, digits
# We are removing certain 'painful' letters/numbers
-PW_SET = (letters.translate(None, 'loLOI') +
- digits.translate(None, '01'))
+PW_SET = (''.join([x for x in ascii_letters + digits
+ if x not in 'loLOI01']))
def handle(_name, cfg, cloud, log, args):
@@ -45,8 +45,6 @@ def handle(_name, cfg, cloud, log, args):
password = util.get_cfg_option_str(cfg, "password", None)
expire = True
- pw_auth = "no"
- change_pwauth = False
plist = None
if 'chpasswd' in cfg:
@@ -104,11 +102,24 @@ def handle(_name, cfg, cloud, log, args):
change_pwauth = False
pw_auth = None
if 'ssh_pwauth' in cfg:
- change_pwauth = True
if util.is_true(cfg['ssh_pwauth']):
+ change_pwauth = True
pw_auth = 'yes'
- if util.is_false(cfg['ssh_pwauth']):
+ elif util.is_false(cfg['ssh_pwauth']):
+ change_pwauth = True
pw_auth = 'no'
+ elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
+ log.debug('Leaving auth line unchanged')
+ change_pwauth = False
+ elif not str(cfg['ssh_pwauth']).strip():
+ log.debug('Leaving auth line unchanged')
+ change_pwauth = False
+ elif not cfg['ssh_pwauth']:
+ log.debug('Leaving auth line unchanged')
+ change_pwauth = False
+ else:
+ msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
+ util.logexc(log, msg)
if change_pwauth:
replaced_auth = False
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
new file mode 100644
index 00000000..fa9d54a0
--- /dev/null
+++ b/cloudinit/config/cc_snappy.py
@@ -0,0 +1,304 @@
+# vi: ts=4 expandtab
+#
+"""
+snappy modules allows configuration of snappy.
+Example config:
+ #cloud-config
+ snappy:
+ system_snappy: auto
+ ssh_enabled: auto
+ packages: [etcd, pkg2.smoser]
+ config:
+ pkgname:
+ key2: value2
+ pkg2:
+ key1: value1
+ packages_dir: '/writable/user-data/cloud-init/snaps'
+
+ - ssh_enabled:
+ This controls the system's ssh service. The default value is 'auto'.
+ True: enable ssh service
+ False: disable ssh service
+ auto: enable ssh service if either ssh keys have been provided
+ or user has requested password authentication (ssh_pwauth).
+
+ - snap installation and config
+ The above would install 'etcd', and then install 'pkg2.smoser' with a
+ '<config-file>' argument where 'config-file' has 'config-blob' inside it.
+ If 'pkgname' is installed already, then 'snappy config pkgname <file>'
+ will be called where 'file' has 'pkgname-config-blob' as its content.
+
+ Entries in 'config' can be namespaced or non-namespaced for a package.
+ In either case, the config provided to snappy command is non-namespaced.
+ The package name is provided as it appears.
+
+ If 'packages_dir' has files in it that end in '.snap', then they are
+ installed. Given 3 files:
+ <packages_dir>/foo.snap
+ <packages_dir>/foo.config
+ <packages_dir>/bar.snap
+ cloud-init will invoke:
+ snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
+ snappy install <packages_dir>/bar.snap
+
+ Note, that if provided a 'config' entry for 'ubuntu-core', then
+ cloud-init will invoke: snappy config ubuntu-core <config>
+ Allowing you to configure ubuntu-core in this way.
+"""
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
+import glob
+import tempfile
+import os
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+SNAPPY_CMD = "snappy"
+NAMESPACE_DELIM = '.'
+
+BUILTIN_CFG = {
+ 'packages': [],
+ 'packages_dir': '/writable/user-data/cloud-init/snaps',
+ 'ssh_enabled': "auto",
+ 'system_snappy': "auto",
+ 'config': {},
+}
+
+
+def parse_filename(fname):
+ fname = os.path.basename(fname)
+ fname_noext = fname.rpartition(".")[0]
+ name = fname_noext.partition("_")[0]
+ shortname = name.partition(".")[0]
+ return(name, shortname, fname_noext)
+
+
+def get_fs_package_ops(fspath):
+ if not fspath:
+ return []
+ ops = []
+ for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
+ (name, shortname, fname_noext) = parse_filename(snapfile)
+ cfg = None
+ for cand in (fname_noext, name, shortname):
+ fpcand = os.path.sep.join([fspath, cand]) + ".config"
+ if os.path.isfile(fpcand):
+ cfg = fpcand
+ break
+ ops.append(makeop('install', name, config=None,
+ path=snapfile, cfgfile=cfg))
+ return ops
+
+
+def makeop(op, name, config=None, path=None, cfgfile=None):
+ return({'op': op, 'name': name, 'config': config, 'path': path,
+ 'cfgfile': cfgfile})
+
+
+def get_package_config(configs, name):
+ # load the package's config from the configs dict.
+ # prefer full-name entry (config-example.canonical)
+ # over short name entry (config-example)
+ if name in configs:
+ return configs[name]
+ return configs.get(name.partition(NAMESPACE_DELIM)[0])
+
+
+def get_package_ops(packages, configs, installed=None, fspath=None):
+ # get the install an config operations that should be done
+ if installed is None:
+ installed = read_installed_packages()
+ short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
+
+ if not packages:
+ packages = []
+ if not configs:
+ configs = {}
+
+ ops = []
+ ops += get_fs_package_ops(fspath)
+
+ for name in packages:
+ ops.append(makeop('install', name, get_package_config(configs, name)))
+
+ to_install = [f['name'] for f in ops]
+ short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
+
+ for name in configs:
+ if name in to_install:
+ continue
+ shortname = name.partition(NAMESPACE_DELIM)[0]
+ if shortname in short_to_install:
+ continue
+ if name in installed or shortname in short_installed:
+ ops.append(makeop('config', name,
+ config=get_package_config(configs, name)))
+
+ # prefer config entries to filepath entries
+ for op in ops:
+ if op['op'] != 'install' or not op['cfgfile']:
+ continue
+ name = op['name']
+ fromcfg = get_package_config(configs, op['name'])
+ if fromcfg:
+ LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
+ op['cfgfile'] = None
+ op['config'] = fromcfg
+
+ return ops
+
+
+def render_snap_op(op, name, path=None, cfgfile=None, config=None):
+ if op not in ('install', 'config'):
+ raise ValueError("cannot render op '%s'" % op)
+
+ shortname = name.partition(NAMESPACE_DELIM)[0]
+ try:
+ cfg_tmpf = None
+ if config is not None:
+ # input to 'snappy config packagename' must have nested data. odd.
+ # config:
+ # packagename:
+ # config
+ # Note, however, we do not touch config files on disk.
+ nested_cfg = {'config': {shortname: config}}
+ (fd, cfg_tmpf) = tempfile.mkstemp()
+ os.write(fd, util.yaml_dumps(nested_cfg).encode())
+ os.close(fd)
+ cfgfile = cfg_tmpf
+
+ cmd = [SNAPPY_CMD, op]
+ if op == 'install':
+ if path:
+ cmd.append("--allow-unauthenticated")
+ cmd.append(path)
+ else:
+ cmd.append(name)
+ if cfgfile:
+ cmd.append(cfgfile)
+ elif op == 'config':
+ cmd += [name, cfgfile]
+
+ util.subp(cmd)
+
+ finally:
+ if cfg_tmpf:
+ os.unlink(cfg_tmpf)
+
+
+def read_installed_packages():
+ ret = []
+ for (name, date, version, dev) in read_pkg_data():
+ if dev:
+ ret.append(NAMESPACE_DELIM.join([name, dev]))
+ else:
+ ret.append(name)
+ return ret
+
+
+def read_pkg_data():
+ out, err = util.subp([SNAPPY_CMD, "list"])
+ pkg_data = []
+ for line in out.splitlines()[1:]:
+ toks = line.split(sep=None, maxsplit=3)
+ if len(toks) == 3:
+ (name, date, version) = toks
+ dev = None
+ else:
+ (name, date, version, dev) = toks
+ pkg_data.append((name, date, version, dev,))
+ return pkg_data
+
+
+def disable_enable_ssh(enabled):
+ LOG.debug("setting enablement of ssh to: %s", enabled)
+ # do something here that would enable or disable
+ not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
+ if enabled:
+ util.del_file(not_to_be_run)
+ # this is an indempotent operation
+ util.subp(["systemctl", "start", "ssh"])
+ else:
+ # this is an indempotent operation
+ util.subp(["systemctl", "stop", "ssh"])
+ util.write_file(not_to_be_run, "cloud-init\n")
+
+
+def system_is_snappy():
+ # channel.ini is configparser loadable.
+ # snappy will move to using /etc/system-image/config.d/*.ini
+ # this is certainly not a perfect test, but good enough for now.
+ content = util.load_file("/etc/system-image/channel.ini", quiet=True)
+ if 'ubuntu-core' in content.lower():
+ return True
+ if os.path.isdir("/etc/system-image/config.d/"):
+ return True
+ return False
+
+
+def set_snappy_command():
+ global SNAPPY_CMD
+ if util.which("snappy-go"):
+ SNAPPY_CMD = "snappy-go"
+ else:
+ SNAPPY_CMD = "snappy"
+ LOG.debug("snappy command is '%s'", SNAPPY_CMD)
+
+
+def handle(name, cfg, cloud, log, args):
+ cfgin = cfg.get('snappy')
+ if not cfgin:
+ cfgin = {}
+ mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
+
+ sys_snappy = str(mycfg.get("system_snappy", "auto"))
+ if util.is_false(sys_snappy):
+ LOG.debug("%s: System is not snappy. disabling", name)
+ return
+
+ if sys_snappy.lower() == "auto" and not(system_is_snappy()):
+ LOG.debug("%s: 'auto' mode, and system not snappy", name)
+ return
+
+ set_snappy_command()
+
+ pkg_ops = get_package_ops(packages=mycfg['packages'],
+ configs=mycfg['config'],
+ fspath=mycfg['packages_dir'])
+
+ fails = []
+ for pkg_op in pkg_ops:
+ try:
+ render_snap_op(**pkg_op)
+ except Exception as e:
+ fails.append((pkg_op, e,))
+ LOG.warn("'%s' failed for '%s': %s",
+ pkg_op['op'], pkg_op['name'], e)
+
+ # Default to disabling SSH
+ ssh_enabled = mycfg.get('ssh_enabled', "auto")
+
+ # If the user has not explicitly enabled or disabled SSH, then enable it
+ # when password SSH authentication is requested or there are SSH keys
+ if ssh_enabled == "auto":
+ user_ssh_keys = cloud.get_public_ssh_keys() or None
+ password_auth_enabled = cfg.get('ssh_pwauth', False)
+ if user_ssh_keys:
+ LOG.debug("Enabling SSH, ssh keys found in datasource")
+ ssh_enabled = True
+ elif cfg.get('ssh_authorized_keys'):
+ LOG.debug("Enabling SSH, ssh keys found in config")
+ elif password_auth_enabled:
+ LOG.debug("Enabling SSH, password authentication requested")
+ ssh_enabled = True
+ elif ssh_enabled not in (True, False):
+ LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled)
+
+ disable_enable_ssh(ssh_enabled)
+
+ if fails:
+ raise Exception("failed to install/configure snaps")
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 4c76581c..d24e43c0 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -20,6 +20,7 @@
import glob
import os
+import sys
# Ensure this is aliased to a name not 'distros'
# since the module attribute 'distros'
@@ -29,30 +30,23 @@ from cloudinit import distros as ds
from cloudinit import ssh_util
from cloudinit import util
-DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
-"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" "
-"rather than the user \\\"root\\\".\';echo;sleep 10\"")
-
-KEY_2_FILE = {
- "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600),
- "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
- "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600),
- "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
- "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600),
- "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
-}
-
-PRIV_2_PUB = {
- 'rsa_private': 'rsa_public',
- 'dsa_private': 'dsa_public',
- 'ecdsa_private': 'ecdsa_public',
-}
+DISABLE_ROOT_OPTS = (
+ "no-port-forwarding,no-agent-forwarding,"
+ "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
+ " rather than the user \\\"root\\\".\';echo;sleep 10\"")
-KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
+GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
+KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
-GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa']
+CONFIG_KEY_TO_FILE = {}
+PRIV_TO_PUB = {}
+for k in GENERATE_KEY_NAMES:
+ CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
+ CONFIG_KEY_TO_FILE.update(
+ {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
+ PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
-KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
def handle(_name, cfg, cloud, log, _args):
@@ -68,16 +62,16 @@ def handle(_name, cfg, cloud, log, _args):
if "ssh_keys" in cfg:
# if there are keys in cloud-config, use them
- for (key, val) in cfg["ssh_keys"].iteritems():
- if key in KEY_2_FILE:
- tgt_fn = KEY_2_FILE[key][0]
- tgt_perms = KEY_2_FILE[key][1]
+ for (key, val) in cfg["ssh_keys"].items():
+ if key in CONFIG_KEY_TO_FILE:
+ tgt_fn = CONFIG_KEY_TO_FILE[key][0]
+ tgt_perms = CONFIG_KEY_TO_FILE[key][1]
util.write_file(tgt_fn, val, tgt_perms)
- for (priv, pub) in PRIV_2_PUB.iteritems():
+ for (priv, pub) in PRIV_TO_PUB.items():
if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
continue
- pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
+ pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
try:
# TODO(harlowja): Is this guard needed?
@@ -92,18 +86,28 @@ def handle(_name, cfg, cloud, log, _args):
genkeys = util.get_cfg_option_list(cfg,
'ssh_genkeytypes',
GENERATE_KEY_NAMES)
+ lang_c = os.environ.copy()
+ lang_c['LANG'] = 'C'
for keytype in genkeys:
keyfile = KEY_FILE_TPL % (keytype)
+ if os.path.exists(keyfile):
+ continue
util.ensure_dir(os.path.dirname(keyfile))
- if not os.path.exists(keyfile):
- cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+ cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+
+ # TODO(harlowja): Is this guard needed?
+ with util.SeLinuxGuard("/etc/ssh", recursive=True):
try:
- # TODO(harlowja): Is this guard needed?
- with util.SeLinuxGuard("/etc/ssh", recursive=True):
- util.subp(cmd, capture=False)
- except:
- util.logexc(log, "Failed generating key type %s to "
- "file %s", keytype, keyfile)
+ out, err = util.subp(cmd, capture=True, env=lang_c)
+ sys.stdout.write(util.decode_binary(out))
+ except util.ProcessExecutionError as e:
+ err = util.decode_binary(e.stderr).lower()
+ if (e.exit_code == 1 and
+ err.lower().startswith("unknown key")):
+ log.debug("ssh-keygen: unknown key type '%s'", keytype)
+ else:
+ util.logexc(log, "Failed generating key type %s to "
+ "file %s", keytype, keyfile)
try:
(users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 51580633..6ce831bc 100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -32,7 +32,7 @@ from cloudinit import util
def _split_hash(bin_hash):
split_up = []
- for i in xrange(0, len(bin_hash), 2):
+ for i in range(0, len(bin_hash), 2):
split_up.append(bin_hash[i:i + 2])
return split_up
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index d3dd1f32..15703efe 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -41,10 +41,10 @@ def handle(name, cfg, cloud, log, _args):
if not tpl_fn_name:
raise RuntimeError(("No hosts template could be"
" found for distro %s") %
- (cloud.distro.osfamily))
+ (cloud.distro.osfamily))
templater.render_to_file(tpl_fn_name, '/etc/hosts',
- {'hostname': hostname, 'fqdn': fqdn})
+ {'hostname': hostname, 'fqdn': fqdn})
elif manage_hosts == "localhost":
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
@@ -57,4 +57,4 @@ def handle(name, cfg, cloud, log, _args):
cloud.distro.update_etc_hosts(hostname, fqdn)
else:
log.debug(("Configuration option 'manage_etc_hosts' is not set,"
- " not managing /etc/hosts in module %s"), name)
+ " not managing /etc/hosts in module %s"), name)
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index e396ba13..5b78afe1 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -29,7 +29,7 @@ frequency = PER_ALWAYS
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
log.debug(("Configuration option 'preserve_hostname' is set,"
- " not updating the hostname in module %s"), name)
+ " not updating the hostname in module %s"), name)
return
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index a73d6f4e..4b03ea91 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -18,6 +18,7 @@
import base64
import os
+import six
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
@@ -25,7 +26,7 @@ from cloudinit import util
frequency = PER_INSTANCE
DEFAULT_OWNER = "root:root"
-DEFAULT_PERMS = 0644
+DEFAULT_PERMS = 0o644
UNKNOWN_ENC = 'text/plain'
@@ -79,7 +80,7 @@ def write_files(name, files, log):
def decode_perms(perm, default, log):
try:
- if isinstance(perm, (int, long, float)):
+ if isinstance(perm, six.integer_types + (float,)):
# Just 'downcast' it (if a float)
return int(perm)
else:
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 0d836f28..64fba869 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,9 +18,10 @@
import os
-from cloudinit import util
-
import configobj
+import six
+
+from cloudinit import util
def _canonicalize_id(repo_id):
@@ -37,7 +38,7 @@ def _format_repo_value(val):
# Can handle 'lists' in certain cases
# See: http://bit.ly/Qqrf1t
return "\n ".join([_format_repo_value(v) for v in val])
- if not isinstance(val, (basestring, str)):
+ if not isinstance(val, six.string_types):
return str(val)
return val
@@ -91,7 +92,7 @@ def handle(name, cfg, _cloud, log, _args):
for req_field in ['baseurl']:
if req_field not in repo_config:
log.warn(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
+ " configuration 'required' entry"),
repo_id, req_field)
missing_required += 1
if not missing_required:
diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
index dcf56431..83ac1a0e 100644
--- a/cloudinit/cs_utils.py
+++ b/cloudinit/cs_utils.py
@@ -83,8 +83,8 @@ class CepkoResult(object):
connection = serial.Serial(port=SERIAL_PORT,
timeout=READ_TIMEOUT,
writeTimeout=WRITE_TIMEOUT)
- connection.write(self.request)
- return connection.readline().strip('\x04\n')
+ connection.write(self.request.encode('ascii'))
+ return connection.readline().strip(b'\x04\n').decode('ascii')
def _marshal(self, raw_result):
try:
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 5eab780b..418421b9 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -21,12 +21,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
+import six
+from six import StringIO
import abc
-import itertools
import os
import re
+import stat
from cloudinit import importer
from cloudinit import log as logging
@@ -36,6 +37,7 @@ from cloudinit import util
from cloudinit.distros.parsers import hosts
+
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'redhat': ['fedora', 'rhel'],
@@ -73,6 +75,9 @@ class Distro(object):
# to write this blob out in a distro format
raise NotImplementedError()
+ def _write_network_config(self, settings):
+ raise NotImplementedError()
+
def _find_tz_file(self, tz):
tz_file = os.path.join(self.tz_zone_dir, str(tz))
if not os.path.isfile(tz_file):
@@ -88,6 +93,13 @@ class Distro(object):
self._write_hostname(writeable_hostname, self.hostname_conf_fn)
self._apply_hostname(writeable_hostname)
+ def uses_systemd(self):
+ try:
+ res = os.lstat('/run/systemd/system')
+ return stat.S_ISDIR(res.st_mode)
+ except:
+ return False
+
@abc.abstractmethod
def package_command(self, cmd, args=None, pkgs=None):
raise NotImplementedError()
@@ -108,12 +120,11 @@ class Distro(object):
arch = self.get_primary_arch()
return _get_arch_package_mirror_info(mirror_info, arch)
- def get_package_mirror_info(self, arch=None,
- availability_zone=None):
+ def get_package_mirror_info(self, arch=None, data_source=None):
# This resolves the package_mirrors config option
# down to a single dict of {mirror_name: mirror_url}
arch_info = self._get_arch_package_mirror_info(arch)
- return _get_package_mirror_info(availability_zone=availability_zone,
+ return _get_package_mirror_info(data_source=data_source,
mirror_info=arch_info)
def apply_network(self, settings, bring_up=True):
@@ -124,6 +135,14 @@ class Distro(object):
return self._bring_up_interfaces(dev_names)
return False
+ def apply_network_config(self, netconfig, bring_up=False):
+ # Write it out
+ dev_names = self._write_network_config(netconfig)
+ # Now try to bring them up
+ if bring_up:
+ return self._bring_up_interfaces(dev_names)
+ return False
+
@abc.abstractmethod
def apply_locale(self, locale, out_fn=None):
raise NotImplementedError()
@@ -203,10 +222,19 @@ class Distro(object):
# If the system hostname is different than the previous
# one or the desired one lets update it as well
- if (not sys_hostname) or (sys_hostname == prev_hostname
- and sys_hostname != hostname):
+ if ((not sys_hostname) or (sys_hostname == prev_hostname and
+ sys_hostname != hostname)):
update_files.append(sys_fn)
+ # If something else has changed the hostname after we set it
+ # initially, we should not overwrite those changes (we should
+ # only be setting the hostname once per instance)
+ if (sys_hostname and prev_hostname and
+ sys_hostname != prev_hostname):
+ LOG.info("%s differs from %s, assuming user maintained hostname.",
+ prev_hostname_fn, sys_fn)
+ return
+
# Remove duplicates (incase the previous config filename)
# is the same as the system config filename, don't bother
# doing it twice
@@ -221,11 +249,6 @@ class Distro(object):
util.logexc(LOG, "Failed to write hostname %s to %s", hostname,
fn)
- if (sys_hostname and prev_hostname and
- sys_hostname != prev_hostname):
- LOG.debug("%s differs from %s, assuming user maintained hostname.",
- prev_hostname_fn, sys_fn)
-
# If the system hostname file name was provided set the
# non-fqdn as the transient hostname.
if sys_fn in update_files:
@@ -272,12 +295,12 @@ class Distro(object):
if header:
contents.write("%s\n" % (header))
contents.write("%s\n" % (eh))
- util.write_file(self.hosts_fn, contents.getvalue(), mode=0644)
+ util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644)
def _bring_up_interface(self, device_name):
cmd = ['ifup', device_name]
LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ device_name, cmd)
try:
(_out, err) = util.subp(cmd)
if len(err):
@@ -307,6 +330,11 @@ class Distro(object):
LOG.info("User %s already exists, skipping." % name)
return
+ if 'create_groups' in kwargs:
+ create_groups = kwargs.pop('create_groups')
+ else:
+ create_groups = True
+
adduser_cmd = ['useradd', name]
log_adduser_cmd = ['useradd', name]
@@ -317,6 +345,7 @@ class Distro(object):
"gecos": '--comment',
"homedir": '--home',
"primary_group": '--gid',
+ "uid": '--uid',
"groups": '--groups',
"passwd": '--password',
"shell": '--shell',
@@ -333,8 +362,21 @@ class Distro(object):
redact_opts = ['passwd']
+ groups = kwargs.get('groups')
+ if groups:
+ if isinstance(groups, (list, tuple)):
+ kwargs['groups'] = ",".join(groups)
+ else:
+ groups = groups.split(",")
+
+ if create_groups:
+ for group in kwargs.get('groups').split(","):
+ if not util.is_group(group):
+ self.create_group(group)
+ LOG.debug("created group %s for user %s", name, group)
+
# Check the values and create the command
- for key, val in kwargs.iteritems():
+ for key, val in kwargs.items():
if key in adduser_opts and val and isinstance(val, str):
adduser_cmd.extend([adduser_opts[key], val])
@@ -380,6 +422,10 @@ class Distro(object):
if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
self.set_passwd(name, kwargs['plain_text_passwd'])
+ # Set password if hashed password is provided and non-empty
+ if 'hashed_passwd' in kwargs and kwargs['hashed_passwd']:
+ self.set_passwd(name, kwargs['hashed_passwd'], hashed=True)
+
# Default locking down the account. 'lock_passwd' defaults to True.
# lock account unless lock_password is False.
if kwargs.get('lock_passwd', True):
@@ -393,7 +439,7 @@ class Distro(object):
if 'ssh_authorized_keys' in kwargs:
# Try to handle this in a smart manner.
keys = kwargs['ssh_authorized_keys']
- if isinstance(keys, (basestring, str)):
+ if isinstance(keys, six.string_types):
keys = [keys]
if isinstance(keys, dict):
keys = list(keys.values())
@@ -468,7 +514,7 @@ class Distro(object):
util.make_header(base="added"),
"#includedir %s" % (path), '']
sudoers_contents = "\n".join(lines)
- util.write_file(sudo_base, sudoers_contents, 0440)
+ util.write_file(sudo_base, sudoers_contents, 0o440)
else:
lines = ['', util.make_header(base="added"),
"#includedir %s" % (path), '']
@@ -478,7 +524,7 @@ class Distro(object):
except IOError as e:
util.logexc(LOG, "Failed to write %s", sudo_base)
raise e
- util.ensure_dir(path, 0750)
+ util.ensure_dir(path, 0o750)
def write_sudo_rules(self, user, rules, sudo_file=None):
if not sudo_file:
@@ -491,7 +537,7 @@ class Distro(object):
if isinstance(rules, (list, tuple)):
for rule in rules:
lines.append("%s %s" % (user, rule))
- elif isinstance(rules, (basestring, str)):
+ elif isinstance(rules, six.string_types):
lines.append("%s %s" % (user, rules))
else:
msg = "Can not create sudoers rule addition with type %r"
@@ -506,7 +552,7 @@ class Distro(object):
content,
]
try:
- util.write_file(sudo_file, "\n".join(contents), 0440)
+ util.write_file(sudo_file, "\n".join(contents), 0o440)
except IOError as e:
util.logexc(LOG, "Failed to write sudoers file %s", sudo_file)
raise e
@@ -517,8 +563,10 @@ class Distro(object):
util.logexc(LOG, "Failed to append sudoers file %s", sudo_file)
raise e
- def create_group(self, name, members):
+ def create_group(self, name, members=None):
group_add_cmd = ['groupadd', name]
+ if not members:
+ members = []
# Check if group exists, and then add it doesn't
if util.is_group(name):
@@ -528,21 +576,21 @@ class Distro(object):
util.subp(group_add_cmd)
LOG.info("Created new group %s" % name)
except Exception:
- util.logexc("Failed to create group %s", name)
+ util.logexc(LOG, "Failed to create group %s", name)
# Add members to the group, if so defined
if len(members) > 0:
for member in members:
if not util.is_user(member):
LOG.warn("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
+ "; user does not exist.", member, name)
continue
util.subp(['usermod', '-a', '-G', name, member])
LOG.info("Added user '%s' to group '%s'" % (member, name))
-def _get_package_mirror_info(mirror_info, availability_zone=None,
+def _get_package_mirror_info(mirror_info, data_source=None,
mirror_filter=util.search_for_mirror):
# given a arch specific 'mirror_info' entry (from package_mirrors)
# search through the 'search' entries, and fallback appropriately
@@ -550,21 +598,28 @@ def _get_package_mirror_info(mirror_info, availability_zone=None,
if not mirror_info:
mirror_info = {}
- ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" %
- "north|northeast|east|southeast|south|southwest|west|northwest")
+ # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
+ # the region is us-east-1. so region = az[0:-1]
+ directions_re = '|'.join([
+ 'central', 'east', 'north', 'northeast', 'northwest',
+ 'south', 'southeast', 'southwest', 'west'])
+ ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re)
subst = {}
- if availability_zone:
- subst['availability_zone'] = availability_zone
+ if data_source and data_source.availability_zone:
+ subst['availability_zone'] = data_source.availability_zone
+
+ if re.match(ec2_az_re, data_source.availability_zone):
+ subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
- if availability_zone and re.match(ec2_az_re, availability_zone):
- subst['ec2_region'] = "%s" % availability_zone[0:-1]
+ if data_source and data_source.region:
+ subst['region'] = data_source.region
results = {}
- for (name, mirror) in mirror_info.get('failsafe', {}).iteritems():
+ for (name, mirror) in mirror_info.get('failsafe', {}).items():
results[name] = mirror
- for (name, searchlist) in mirror_info.get('search', {}).iteritems():
+ for (name, searchlist) in mirror_info.get('search', {}).items():
mirrors = []
for tmpl in searchlist:
try:
@@ -604,30 +659,30 @@ def _get_arch_package_mirror_info(package_mirrors, arch):
# is the standard form used in the rest
# of cloud-init
def _normalize_groups(grp_cfg):
- if isinstance(grp_cfg, (str, basestring)):
+ if isinstance(grp_cfg, six.string_types):
grp_cfg = grp_cfg.strip().split(",")
- if isinstance(grp_cfg, (list)):
+ if isinstance(grp_cfg, list):
c_grp_cfg = {}
for i in grp_cfg:
- if isinstance(i, (dict)):
+ if isinstance(i, dict):
for k, v in i.items():
if k not in c_grp_cfg:
- if isinstance(v, (list)):
+ if isinstance(v, list):
c_grp_cfg[k] = list(v)
- elif isinstance(v, (basestring, str)):
+ elif isinstance(v, six.string_types):
c_grp_cfg[k] = [v]
else:
raise TypeError("Bad group member type %s" %
type_utils.obj_name(v))
else:
- if isinstance(v, (list)):
+ if isinstance(v, list):
c_grp_cfg[k].extend(v)
- elif isinstance(v, (basestring, str)):
+ elif isinstance(v, six.string_types):
c_grp_cfg[k].append(v)
else:
raise TypeError("Bad group member type %s" %
type_utils.obj_name(v))
- elif isinstance(i, (str, basestring)):
+ elif isinstance(i, six.string_types):
if i not in c_grp_cfg:
c_grp_cfg[i] = []
else:
@@ -635,7 +690,7 @@ def _normalize_groups(grp_cfg):
type_utils.obj_name(i))
grp_cfg = c_grp_cfg
groups = {}
- if isinstance(grp_cfg, (dict)):
+ if isinstance(grp_cfg, dict):
for (grp_name, grp_members) in grp_cfg.items():
groups[grp_name] = util.uniq_merge_sorted(grp_members)
else:
@@ -661,29 +716,29 @@ def _normalize_groups(grp_cfg):
# entry 'default' which will be marked as true
# all other users will be marked as false.
def _normalize_users(u_cfg, def_user_cfg=None):
- if isinstance(u_cfg, (dict)):
+ if isinstance(u_cfg, dict):
ad_ucfg = []
for (k, v) in u_cfg.items():
- if isinstance(v, (bool, int, basestring, str, float)):
+ if isinstance(v, (bool, int, float) + six.string_types):
if util.is_true(v):
ad_ucfg.append(str(k))
- elif isinstance(v, (dict)):
+ elif isinstance(v, dict):
v['name'] = k
ad_ucfg.append(v)
else:
raise TypeError(("Unmappable user value type %s"
" for key %s") % (type_utils.obj_name(v), k))
u_cfg = ad_ucfg
- elif isinstance(u_cfg, (str, basestring)):
+ elif isinstance(u_cfg, six.string_types):
u_cfg = util.uniq_merge_sorted(u_cfg)
users = {}
for user_config in u_cfg:
- if isinstance(user_config, (str, basestring, list)):
+ if isinstance(user_config, (list,) + six.string_types):
for u in util.uniq_merge(user_config):
if u and u not in users:
users[u] = {}
- elif isinstance(user_config, (dict)):
+ elif isinstance(user_config, dict):
if 'name' in user_config:
n = user_config.pop('name')
prev_config = users.get(n) or {}
@@ -784,11 +839,11 @@ def normalize_users_groups(cfg, distro):
old_user = cfg['user']
# Translate it into the format that is more useful
# going forward
- if isinstance(old_user, (basestring, str)):
+ if isinstance(old_user, six.string_types):
old_user = {
'name': old_user,
}
- if not isinstance(old_user, (dict)):
+ if not isinstance(old_user, dict):
LOG.warn(("Format for 'user' key must be a string or "
"dictionary and not %s"), type_utils.obj_name(old_user))
old_user = {}
@@ -813,7 +868,7 @@ def normalize_users_groups(cfg, distro):
default_user_config = util.mergemanydict([old_user, distro_user_config])
base_users = cfg.get('users', [])
- if not isinstance(base_users, (list, dict, str, basestring)):
+ if not isinstance(base_users, (list, dict) + six.string_types):
LOG.warn(("Format for 'users' key must be a comma separated string"
" or a dictionary or a list and not %s"),
type_utils.obj_name(base_users))
@@ -822,12 +877,12 @@ def normalize_users_groups(cfg, distro):
if old_user:
# Ensure that when user: is provided that this user
# always gets added (as the default user)
- if isinstance(base_users, (list)):
+ if isinstance(base_users, list):
# Just add it on at the end...
base_users.append({'name': 'default'})
- elif isinstance(base_users, (dict)):
+ elif isinstance(base_users, dict):
base_users['default'] = dict(base_users).get('default', True)
- elif isinstance(base_users, (str, basestring)):
+ elif isinstance(base_users, six.string_types):
# Just append it on to be re-parsed later
base_users += ",default"
@@ -852,11 +907,11 @@ def extract_default(users, default_name=None, default_config=None):
return config['default']
tmp_users = users.items()
- tmp_users = dict(itertools.ifilter(safe_find, tmp_users))
+ tmp_users = dict(filter(safe_find, tmp_users))
if not tmp_users:
return (default_name, default_config)
else:
- name = tmp_users.keys()[0]
+ name = list(tmp_users)[0]
config = tmp_users[name]
config.pop('default', None)
return (name, config)
@@ -866,7 +921,7 @@ def fetch(name):
locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])
if not locs:
raise ImportError("No distribution found for distro %s (searched %s)"
- % (name, looked_locs))
+ % (name, looked_locs))
mod = importer.import_module(locs[0])
cls = getattr(mod, 'Distro')
return cls
@@ -877,5 +932,9 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
util.write_file(tz_conf, str(tz).rstrip() + "\n")
# This ensures that the correct tz will be used for the system
if tz_local and tz_file:
- util.copy(tz_file, tz_local)
+ # use a symlink if there exists a symlink or tz_local is not present
+ if os.path.islink(tz_local) or not os.path.exists(tz_local):
+ os.symlink(tz_file, tz_local)
+ else:
+ util.copy(tz_file, tz_local)
return
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 68bf1aab..93a2e008 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -66,7 +66,7 @@ class Distro(distros.Distro):
settings, entries)
dev_names = entries.keys()
# Format for netctl
- for (dev, info) in entries.iteritems():
+ for (dev, info) in entries.items():
nameservers = []
net_fn = self.network_conf_dir + dev
net_cfg = {
@@ -74,7 +74,7 @@ class Distro(distros.Distro):
'Interface': dev,
'IP': info.get('bootproto'),
'Address': "('%s/%s')" % (info.get('address'),
- info.get('netmask')),
+ info.get('netmask')),
'Gateway': info.get('gateway'),
'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '')
}
@@ -86,7 +86,7 @@ class Distro(distros.Distro):
if nameservers:
util.write_file(self.resolve_conf_fn,
- convert_resolv_conf(nameservers))
+ convert_resolv_conf(nameservers))
return dev_names
@@ -102,7 +102,7 @@ class Distro(distros.Distro):
def _bring_up_interface(self, device_name):
cmd = ['netctl', 'restart', device_name]
LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ device_name, cmd)
try:
(_out, err) = util.subp(cmd)
if len(err):
@@ -129,7 +129,7 @@ class Distro(distros.Distro):
if not conf:
conf = HostnameConf('')
conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0644)
+ util.write_file(out_fn, conf, 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index b09eb094..5d7e6cfc 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -26,6 +26,7 @@ from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import net
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -45,7 +46,8 @@ APT_GET_WRAPPER = {
class Distro(distros.Distro):
hostname_conf_fn = "/etc/hostname"
locale_conf_fn = "/etc/default/locale"
- network_conf_fn = "/etc/network/interfaces"
+ network_conf_fn = "/etc/network/interfaces.d/50-cloud-init.cfg"
+ links_prefix = "/etc/systemd/network/50-cloud-init-"
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -76,6 +78,15 @@ class Distro(distros.Distro):
util.write_file(self.network_conf_fn, settings)
return ['all']
+ def _write_network_config(self, netconfig):
+ ns = net.parse_net_config_data(netconfig)
+ net.render_network_state(target="/", network_state=ns,
+ eni=self.network_conf_fn,
+ links_prefix=self.links_prefix,
+ netrules=None)
+ util.del_file("/etc/network/interfaces.d/eth0.cfg")
+ return []
+
def _bring_up_interfaces(self, device_names):
use_all = False
for d in device_names:
@@ -97,7 +108,7 @@ class Distro(distros.Distro):
if not conf:
conf = HostnameConf('')
conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0644)
+ util.write_file(out_fn, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
@@ -159,8 +170,9 @@ class Distro(distros.Distro):
# Allow the output of this to flow outwards (ie not be captured)
util.log_time(logfunc=LOG.debug,
- msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp,
- args=(cmd,), kwargs={'env': e, 'capture': False})
+ msg="apt-%s [%s]" % (command, ' '.join(cmd)),
+ func=util.subp,
+ args=(cmd,), kwargs={'env': e, 'capture': False})
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index c59a074b..91bf4a4e 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -17,8 +17,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-
-from StringIO import StringIO
+import six
+from six import StringIO
import re
@@ -207,8 +207,9 @@ class Distro(distros.Distro):
redact_opts = ['passwd']
- for key, val in kwargs.iteritems():
- if key in adduser_opts and val and isinstance(val, basestring):
+ for key, val in kwargs.items():
+ if (key in adduser_opts and val and
+ isinstance(val, six.string_types)):
adduser_cmd.extend([adduser_opts[key], val])
# Redact certain fields from the logs
@@ -287,7 +288,7 @@ class Distro(distros.Distro):
nameservers = []
searchdomains = []
dev_names = entries.keys()
- for (device, info) in entries.iteritems():
+ for (device, info) in entries.items():
# Skip the loopback interface.
if device.startswith('lo'):
continue
@@ -339,7 +340,7 @@ class Distro(distros.Distro):
resolvconf.add_search_domain(domain)
except ValueError:
util.logexc(LOG, "Failed to add search domain %s", domain)
- util.write_file(self.resolv_conf_fn, str(resolvconf), 0644)
+ util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644)
return dev_names
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 09dd0d73..6267dd6e 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -66,7 +66,7 @@ class Distro(distros.Distro):
def _bring_up_interface(self, device_name):
cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ device_name, cmd)
try:
(_out, err) = util.subp(cmd)
if len(err):
@@ -88,7 +88,7 @@ class Distro(distros.Distro):
(_out, err) = util.subp(cmd)
if len(err):
LOG.warn("Running %s resulted in stderr output: %s", cmd,
- err)
+ err)
except util.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -108,7 +108,7 @@ class Distro(distros.Distro):
if not conf:
conf = HostnameConf('')
conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0644)
+ util.write_file(out_fn, conf, 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
index 8b28e2d1..cadfa6b6 100644
--- a/cloudinit/distros/net_util.py
+++ b/cloudinit/distros/net_util.py
@@ -103,7 +103,7 @@ def translate_network(settings):
consume[cmd] = args
# Check if anything left over to consume
absorb = False
- for (cmd, args) in consume.iteritems():
+ for (cmd, args) in consume.items():
if cmd == 'iface':
absorb = True
if absorb:
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
index 617b3c36..efb185d4 100644
--- a/cloudinit/distros/parsers/hostname.py
+++ b/cloudinit/distros/parsers/hostname.py
@@ -16,7 +16,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
+from six import StringIO
from cloudinit.distros.parsers import chop_comment
@@ -84,5 +84,5 @@ class HostnameConf(object):
hostnames_found.add(head)
if len(hostnames_found) > 1:
raise IOError("Multiple hostnames (%s) found!"
- % (hostnames_found))
+ % (hostnames_found))
return entries
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
index 94c97051..3c5498ee 100644
--- a/cloudinit/distros/parsers/hosts.py
+++ b/cloudinit/distros/parsers/hosts.py
@@ -16,7 +16,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
+from six import StringIO
from cloudinit.distros.parsers import chop_comment
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 5733c25a..2ed13d9c 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -16,7 +16,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
+from six import StringIO
from cloudinit import util
@@ -132,7 +132,7 @@ class ResolvConf(object):
# Some hard limit on 256 chars total
raise ValueError(("Adding %r would go beyond the "
"256 maximum search list character limit")
- % (search_domain))
+ % (search_domain))
self._remove_option('search')
self._contents.append(('option', ['search', s_list, '']))
return flat_sds
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
index 20ca1871..6157cf32 100644
--- a/cloudinit/distros/parsers/sys_conf.py
+++ b/cloudinit/distros/parsers/sys_conf.py
@@ -16,7 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
+import six
+from six import StringIO
import pipes
import re
@@ -69,15 +70,14 @@ class SysConf(configobj.ConfigObj):
return out_contents.getvalue()
def _quote(self, value, multiline=False):
- if not isinstance(value, (str, basestring)):
+ if not isinstance(value, six.string_types):
raise ValueError('Value "%s" is not a string' % (value))
if len(value) == 0:
return ''
quot_func = None
if value[0] in ['"', "'"] and value[-1] in ['"', "'"]:
if len(value) == 1:
- quot_func = (lambda x:
- self._get_single_quote(x) % x)
+ quot_func = (lambda x: self._get_single_quote(x) % x)
else:
# Quote whitespace if it isn't the start + end of a shell command
if value.strip().startswith("$(") and value.strip().endswith(")"):
@@ -90,10 +90,10 @@ class SysConf(configobj.ConfigObj):
# to use single quotes which won't get expanded...
if re.search(r"[\n\"']", value):
quot_func = (lambda x:
- self._get_triple_quote(x) % x)
+ self._get_triple_quote(x) % x)
else:
quot_func = (lambda x:
- self._get_single_quote(x) % x)
+ self._get_single_quote(x) % x)
else:
quot_func = pipes.quote
if not quot_func:
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index d9588632..812e7002 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -73,7 +73,7 @@ class Distro(distros.Distro):
searchservers = []
dev_names = entries.keys()
use_ipv6 = False
- for (dev, info) in entries.iteritems():
+ for (dev, info) in entries.items():
net_fn = self.network_script_tpl % (dev)
net_cfg = {
'DEVICE': dev,
@@ -111,13 +111,6 @@ class Distro(distros.Distro):
rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
return dev_names
- def uses_systemd(self):
- # Fedora 18 and RHEL 7 were the first adopters in their series
- (dist, vers) = util.system_info()['dist'][:2]
- major = (int)(vers.split('.')[0])
- return ((dist.startswith('Red Hat Enterprise Linux') and major >= 7)
- or (dist.startswith('Fedora') and major >= 18))
-
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
if not out_fn:
@@ -132,7 +125,11 @@ class Distro(distros.Distro):
rhel_util.update_sysconfig_file(out_fn, locale_cfg)
def _write_hostname(self, hostname, out_fn):
- if self.uses_systemd():
+ # systemd will never update previous-hostname for us, so
+ # we need to do it ourselves
+ if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
+ util.write_file(out_fn, hostname)
+ elif self.uses_systemd():
util.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
host_cfg = {
@@ -155,7 +152,9 @@ class Distro(distros.Distro):
return (host_fn, self._read_hostname(host_fn))
def _read_hostname(self, filename, default=None):
- if self.uses_systemd():
+ if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ return util.load_file(filename).strip()
+ elif self.uses_systemd():
(out, _err) = util.subp(['hostname'])
if len(out):
return out
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
index 063d536e..84aad623 100644
--- a/cloudinit/distros/rhel_util.py
+++ b/cloudinit/distros/rhel_util.py
@@ -50,7 +50,7 @@ def update_sysconfig_file(fn, adjustments, allow_empty=False):
]
if not exists:
lines.insert(0, util.make_header())
- util.write_file(fn, "\n".join(lines) + "\n", 0644)
+ util.write_file(fn, "\n".join(lines) + "\n", 0o644)
# Helper function to read a RHEL/SUSE /etc/sysconfig/* file
@@ -86,4 +86,4 @@ def update_resolve_conf_file(fn, dns_servers, search_servers):
r_conf.add_search_domain(s)
except ValueError:
util.logexc(LOG, "Failed at adding search domain %s", s)
- util.write_file(fn, str(r_conf), 0644)
+ util.write_file(fn, r_conf, 0o644)
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index 43682a12..620c974c 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -62,7 +62,7 @@ class Distro(distros.Distro):
nameservers = []
searchservers = []
dev_names = entries.keys()
- for (dev, info) in entries.iteritems():
+ for (dev, info) in entries.items():
net_fn = self.network_script_tpl % (dev)
mode = info.get('auto')
if mode and mode.lower() == 'true':
@@ -113,7 +113,7 @@ class Distro(distros.Distro):
if not conf:
conf = HostnameConf('')
conf.set_hostname(hostname)
- util.write_file(out_fn, str(conf), 0644)
+ util.write_file(out_fn, str(conf), 0o644)
def _read_system_hostname(self):
host_fn = self.hostname_conf_fn
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index e69d06ff..37b92a83 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -17,7 +17,6 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import functools
-import httplib
import json
from cloudinit import log as logging
@@ -25,7 +24,7 @@ from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
-SKIP_USERDATA_CODES = frozenset([httplib.NOT_FOUND])
+SKIP_USERDATA_CODES = frozenset([url_helper.NOT_FOUND])
class MetadataLeafDecoder(object):
@@ -42,6 +41,10 @@ class MetadataLeafDecoder(object):
def __call__(self, field, blob):
if not blob:
return blob
+ try:
+ blob = util.decode_binary(blob)
+ except UnicodeDecodeError:
+ return blob
if self._maybe_json_object(blob):
try:
# Assume it's json, unless it fails parsing...
@@ -70,6 +73,8 @@ class MetadataMaterializer(object):
def _parse(self, blob):
leaves = {}
children = []
+ blob = util.decode_binary(blob)
+
if not blob:
return (leaves, children)
@@ -118,12 +123,12 @@ class MetadataMaterializer(object):
child_url = url_helper.combine_url(base_url, c)
if not child_url.endswith("/"):
child_url += "/"
- child_blob = str(self._caller(child_url))
+ child_blob = self._caller(child_url)
child_contents[c] = self._materialize(child_blob, child_url)
leaf_contents = {}
for (field, resource) in leaves.items():
leaf_url = url_helper.combine_url(base_url, resource)
- leaf_blob = str(self._caller(leaf_url))
+ leaf_blob = self._caller(leaf_url)
leaf_contents[field] = self._leaf_decoder(field, leaf_blob)
joined = {}
joined.update(child_contents)
@@ -160,7 +165,7 @@ def get_instance_userdata(api_version='latest',
timeout=timeout,
retries=retries,
exception_cb=exception_cb)
- user_data = str(response)
+ user_data = response.contents
except url_helper.UrlError as e:
if e.code not in SKIP_USERDATA_CODES:
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
@@ -181,10 +186,13 @@ def get_instance_metadata(api_version='latest',
ssl_details=ssl_details, timeout=timeout,
retries=retries)
+ def mcaller(url):
+ return caller(url).contents
+
try:
response = caller(md_url)
- materializer = MetadataMaterializer(str(response),
- md_url, caller,
+ materializer = MetadataMaterializer(response.contents,
+ md_url, mcaller,
leaf_decoder=leaf_decoder)
md = materializer.materialize()
if not isinstance(md, (dict)):
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
index 5bebd318..baecdac9 100644
--- a/cloudinit/filters/launch_index.py
+++ b/cloudinit/filters/launch_index.py
@@ -61,7 +61,7 @@ class Filter(object):
discarded += 1
LOG.debug(("Discarding %s multipart messages "
"which do not match launch index %s"),
- discarded, self.wanted_idx)
+ discarded, self.wanted_idx)
new_message = copy.copy(message)
new_message.set_payload(new_msgs)
new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs))
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 059d7495..53d5604a 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -22,6 +22,7 @@
import abc
import os
+import six
from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
@@ -147,7 +148,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):
if not modfname.endswith(".py"):
modfname = "%s.py" % (modfname)
# TODO(harlowja): Check if path exists??
- util.write_file(modfname, payload, 0600)
+ util.write_file(modfname, payload, 0o600)
handlers = pdata['handlers']
try:
mod = fixup_handler(importer.import_module(modname))
@@ -162,26 +163,38 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):
def _extract_first_or_bytes(blob, size):
- # Extract the first line upto X bytes or X bytes from more than the
- # first line if the first line does not contain enough bytes
- first_line = blob.split("\n", 1)[0]
- if len(first_line) >= size:
- start = first_line[:size]
- else:
+ # Extract the first line or upto X symbols for text objects
+ # Extract first X bytes for binary objects
+ try:
+ if isinstance(blob, six.string_types):
+ start = blob.split("\n", 1)[0]
+ else:
+ # We want to avoid decoding the whole blob (it might be huge)
+ # By taking 4*size bytes we guarantee to decode size utf8 chars
+ start = blob[:4 * size].decode(errors='ignore').split("\n", 1)[0]
+ if len(start) >= size:
+ start = start[:size]
+ except UnicodeDecodeError:
+ # Bytes array doesn't contain text so return chunk of raw bytes
start = blob[0:size]
return start
def _escape_string(text):
try:
- return text.encode("string-escape")
- except TypeError:
+ return text.encode("string_escape")
+ except (LookupError, TypeError):
try:
- # Unicode doesn't support string-escape...
- return text.encode('unicode-escape')
+ # Unicode (and Python 3's str) doesn't support string_escape...
+ return text.encode('unicode_escape')
except TypeError:
# Give up...
pass
+ except AttributeError:
+ # We're in Python3 and received blob as text
+ # No escaping is needed because bytes are printed
+ # as 'b\xAA\xBB' automatically in Python3
+ pass
return text
@@ -232,7 +245,8 @@ def walk(msg, callback, data):
headers = dict(part)
LOG.debug(headers)
headers['Content-Type'] = ctype
- callback(data, filename, part.get_payload(decode=True), headers)
+ payload = util.fully_decoded_payload(part)
+ callback(data, filename, payload, headers)
partnum = partnum + 1
@@ -249,7 +263,10 @@ def fixup_handler(mod, def_freq=PER_INSTANCE):
def type_from_starts_with(payload, default=None):
- payload_lc = payload.lower()
+ try:
+ payload_lc = util.decode_binary(payload).lower()
+ except UnicodeDecodeError:
+ return default
payload_lc = payload_lc.lstrip()
for text in INCLUSION_SRCH:
if payload_lc.startswith(text):
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index 3a50cf87..a4ea47ac 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -50,7 +50,7 @@ class BootHookPartHandler(handlers.Handler):
filepath = os.path.join(self.boothook_dir, filename)
contents = util.strip_prefix_suffix(util.dos2unix(payload),
prefix=BOOTHOOK_PREFIX)
- util.write_file(filepath, contents.lstrip(), 0700)
+ util.write_file(filepath, contents.lstrip(), 0o700)
return filepath
def handle_part(self, data, ctype, filename, payload, frequency):
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index bf994e33..07b6d0e0 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -95,7 +95,7 @@ class CloudConfigPartHandler(handlers.Handler):
lines.append(util.yaml_dumps(self.cloud_buf))
else:
lines = []
- util.write_file(self.cloud_fn, "\n".join(lines), 0600)
+ util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
def _extract_mergers(self, payload, headers):
merge_header_headers = ''
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index 9755ab05..b5087693 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -52,4 +52,4 @@ class ShellScriptPartHandler(handlers.Handler):
filename = util.clean_filename(filename)
payload = util.dos2unix(payload)
path = os.path.join(self.script_dir, filename)
- util.write_file(path, payload, 0700)
+ util.write_file(path, payload, 0o700)
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 50d193c4..c5bea711 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -65,7 +65,7 @@ class UpstartJobPartHandler(handlers.Handler):
payload = util.dos2unix(payload)
path = os.path.join(self.upstart_dir, filename)
- util.write_file(path, payload, 0644)
+ util.write_file(path, payload, 0o644)
if SUITABLE_UPSTART:
util.subp(["initctl", "reload-configuration"], capture=False)
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index e701126e..0cf982f3 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -23,10 +23,11 @@
from time import time
import contextlib
-import io
import os
-from ConfigParser import (NoSectionError, NoOptionError, RawConfigParser)
+import six
+from six.moves.configparser import (
+ NoSectionError, NoOptionError, RawConfigParser)
from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
CFG_ENV_NAME)
@@ -138,9 +139,10 @@ class FileSemaphores(object):
# but the item had run before we did canon_sem_name.
if cname != name and os.path.exists(self._get_path(name, freq)):
LOG.warn("%s has run without canonicalized name [%s].\n"
- "likely the migrator has not yet run. It will run next boot.\n"
- "run manually with: cloud-init single --name=migrator"
- % (name, cname))
+ "likely the migrator has not yet run. "
+ "It will run next boot.\n"
+ "run manually with: cloud-init single --name=migrator"
+ % (name, cname))
return True
return False
@@ -318,10 +320,7 @@ class ContentHandlers(object):
return self.registered[content_type]
def items(self):
- return self.registered.items()
-
- def iteritems(self):
- return self.registered.iteritems()
+ return list(self.registered.items())
class Paths(object):
@@ -337,19 +336,19 @@ class Paths(object):
template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
self.template_tpl = os.path.join(template_dir, '%s.tmpl')
self.lookups = {
- "handlers": "handlers",
- "scripts": "scripts",
- "vendor_scripts": "scripts/vendor",
- "sem": "sem",
- "boothooks": "boothooks",
- "userdata_raw": "user-data.txt",
- "userdata": "user-data.txt.i",
- "obj_pkl": "obj.pkl",
- "cloud_config": "cloud-config.txt",
- "vendor_cloud_config": "vendor-cloud-config.txt",
- "data": "data",
- "vendordata_raw": "vendor-data.txt",
- "vendordata": "vendor-data.txt.i",
+ "handlers": "handlers",
+ "scripts": "scripts",
+ "vendor_scripts": "scripts/vendor",
+ "sem": "sem",
+ "boothooks": "boothooks",
+ "userdata_raw": "user-data.txt",
+ "userdata": "user-data.txt.i",
+ "obj_pkl": "obj.pkl",
+ "cloud_config": "cloud-config.txt",
+ "vendor_cloud_config": "vendor-cloud-config.txt",
+ "data": "data",
+ "vendordata_raw": "vendor-data.txt",
+ "vendordata": "vendor-data.txt.i",
}
# Set when a datasource becomes active
self.datasource = ds
@@ -449,7 +448,7 @@ class DefaultingConfigParser(RawConfigParser):
def stringify(self, header=None):
contents = ''
- with io.BytesIO() as outputstream:
+ with six.StringIO() as outputstream:
self.write(outputstream)
outputstream.flush()
contents = outputstream.getvalue()
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 622c946c..3c79b9c9 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -28,7 +28,8 @@ import collections
import os
import sys
-from StringIO import StringIO
+import six
+from six import StringIO
# Logging levels for easy access
CRITICAL = logging.CRITICAL
@@ -72,13 +73,13 @@ def setupLogging(cfg=None):
log_cfgs = []
log_cfg = cfg.get('logcfg')
- if log_cfg and isinstance(log_cfg, (str, basestring)):
+ if log_cfg and isinstance(log_cfg, six.string_types):
# If there is a 'logcfg' entry in the config,
# respect it, it is the old keyname
log_cfgs.append(str(log_cfg))
elif "log_cfgs" in cfg:
for a_cfg in cfg['log_cfgs']:
- if isinstance(a_cfg, (basestring, str)):
+ if isinstance(a_cfg, six.string_types):
log_cfgs.append(a_cfg)
elif isinstance(a_cfg, (collections.Iterable)):
cfg_str = [str(c) for c in a_cfg]
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
index 03aa1ee1..e13f55ac 100644
--- a/cloudinit/mergers/__init__.py
+++ b/cloudinit/mergers/__init__.py
@@ -18,6 +18,8 @@
import re
+import six
+
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import type_utils
@@ -95,7 +97,7 @@ def dict_extract_mergers(config):
raw_mergers = config.pop('merge_type', None)
if raw_mergers is None:
return parsed_mergers
- if isinstance(raw_mergers, (str, basestring)):
+ if isinstance(raw_mergers, six.string_types):
return string_extract_mergers(raw_mergers)
for m in raw_mergers:
if isinstance(m, (dict)):
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
index a16141fa..87cf1a72 100644
--- a/cloudinit/mergers/m_dict.py
+++ b/cloudinit/mergers/m_dict.py
@@ -16,6 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import six
+
DEF_MERGE_TYPE = 'no_replace'
MERGE_TYPES = ('replace', DEF_MERGE_TYPE,)
@@ -57,7 +59,7 @@ class Merger(object):
return new_v
if isinstance(new_v, (list, tuple)) and self._recurse_array:
return self._merger.merge(old_v, new_v)
- if isinstance(new_v, (basestring)) and self._recurse_str:
+ if isinstance(new_v, six.string_types) and self._recurse_str:
return self._merger.merge(old_v, new_v)
if isinstance(new_v, (dict)) and self._recurse_dict:
return self._merger.merge(old_v, new_v)
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
index 3b87b0fc..81e5c580 100644
--- a/cloudinit/mergers/m_list.py
+++ b/cloudinit/mergers/m_list.py
@@ -16,6 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import six
+
DEF_MERGE_TYPE = 'replace'
MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace')
@@ -73,7 +75,7 @@ class Merger(object):
return old_v
if isinstance(new_v, (list, tuple)) and self._recurse_array:
return self._merger.merge(old_v, new_v)
- if isinstance(new_v, (str, basestring)) and self._recurse_str:
+ if isinstance(new_v, six.string_types) and self._recurse_str:
return self._merger.merge(old_v, new_v)
if isinstance(new_v, (dict)) and self._recurse_dict:
return self._merger.merge(old_v, new_v)
@@ -82,6 +84,6 @@ class Merger(object):
# Ok now we are replacing same indexes
merged_list.extend(value)
common_len = min(len(merged_list), len(merge_with))
- for i in xrange(0, common_len):
+ for i in range(0, common_len):
merged_list[i] = merge_same_index(merged_list[i], merge_with[i])
return merged_list
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
index e22ce28a..b00c4bf3 100644
--- a/cloudinit/mergers/m_str.py
+++ b/cloudinit/mergers/m_str.py
@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import six
+
class Merger(object):
def __init__(self, _merger, opts):
@@ -34,11 +36,11 @@ class Merger(object):
# perform the following action, if appending we will
# merge them together, otherwise we will just return value.
def _on_str(self, value, merge_with):
- if not isinstance(value, (basestring)):
+ if not isinstance(value, six.string_types):
return merge_with
if not self._append:
return merge_with
- if isinstance(value, unicode):
- return value + unicode(merge_with)
+ if isinstance(value, six.text_type):
+ return value + six.text_type(merge_with)
else:
- return value + str(merge_with)
+ return value + six.binary_type(merge_with)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
new file mode 100644
index 00000000..40929c6e
--- /dev/null
+++ b/cloudinit/net/__init__.py
@@ -0,0 +1,751 @@
+# Copyright (C) 2013-2014 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Blake Rouse <blake.rouse@canonical.com>
+#
+# Curtin is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
+
+import base64
+import errno
+import glob
+import gzip
+import io
+import os
+import re
+import shlex
+
+from cloudinit import log as logging
+from cloudinit import util
+from .udev import generate_udev_rule
+from . import network_state
+
+LOG = logging.getLogger(__name__)
+
+SYS_CLASS_NET = "/sys/class/net/"
+LINKS_FNAME_PREFIX = "etc/systemd/network/50-cloud-init-"
+
+NET_CONFIG_OPTIONS = [
+ "address", "netmask", "broadcast", "network", "metric", "gateway",
+ "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime",
+ "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame",
+ "netnum", "endpoint", "local", "ttl",
+ ]
+
+NET_CONFIG_COMMANDS = [
+ "pre-up", "up", "post-up", "down", "pre-down", "post-down",
+ ]
+
+NET_CONFIG_BRIDGE_OPTIONS = [
+ "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit",
+ "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp",
+ ]
+
+DEFAULT_PRIMARY_INTERFACE = 'eth0'
+
+
+def sys_dev_path(devname, path=""):
+ return SYS_CLASS_NET + devname + "/" + path
+
+
+def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None):
+ try:
+ contents = ""
+ with open(sys_dev_path(devname, path), "r") as fp:
+ contents = fp.read().strip()
+ if translate is None:
+ return contents
+
+ try:
+ return translate.get(contents)
+ except KeyError:
+ LOG.debug("found unexpected value '%s' in '%s/%s'", contents,
+ devname, path)
+ if keyerror is not None:
+ return keyerror
+ raise
+ except OSError as e:
+ if e.errno == errno.ENOENT and enoent is not None:
+ return enoent
+ raise
+
+
+def is_up(devname):
+ # The linux kernel says to consider devices in 'unknown'
+ # operstate as up for the purposes of network configuration. See
+ # Documentation/networking/operstates.txt in the kernel source.
+ translate = {'up': True, 'unknown': True, 'down': False}
+ return read_sys_net(devname, "operstate", enoent=False, keyerror=False,
+ translate=translate)
+
+
+def is_wireless(devname):
+ return os.path.exists(sys_dev_path(devname, "wireless"))
+
+
+def is_connected(devname):
+ # is_connected isn't really as simple as that. 2 is
+ # 'physically connected'. 3 is 'not connected'. but a wlan interface will
+ # always show 3.
+ try:
+ iflink = read_sys_net(devname, "iflink", enoent=False)
+ if iflink == "2":
+ return True
+ if not is_wireless(devname):
+ return False
+ LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname)
+
+ return read_sys_net(devname, "carrier", enoent=False, keyerror=False,
+ translate={'0': False, '1': True})
+
+ except IOError as e:
+ if e.errno == errno.EINVAL:
+ return False
+ raise
+
+
+def is_physical(devname):
+ return os.path.exists(sys_dev_path(devname, "device"))
+
+
+def is_present(devname):
+ return os.path.exists(sys_dev_path(devname))
+
+
+def get_devicelist():
+ return os.listdir(SYS_CLASS_NET)
+
+
+class ParserError(Exception):
+ """Raised when parser has issue parsing the interfaces file."""
+
+
+def parse_deb_config_data(ifaces, contents, src_dir, src_path):
+ """Parses the file contents, placing result into ifaces.
+
+ '_source_path' is added to every dictionary entry to define which file
+ the configration information came from.
+
+ :param ifaces: interface dictionary
+ :param contents: contents of interfaces file
+ :param src_dir: directory interfaces file was located
+ :param src_path: file path the `contents` was read
+ """
+ currif = None
+ for line in contents.splitlines():
+ line = line.strip()
+ if line.startswith('#'):
+ continue
+ split = line.split(' ')
+ option = split[0]
+ if option == "source-directory":
+ parsed_src_dir = split[1]
+ if not parsed_src_dir.startswith("/"):
+ parsed_src_dir = os.path.join(src_dir, parsed_src_dir)
+ for expanded_path in glob.glob(parsed_src_dir):
+ dir_contents = os.listdir(expanded_path)
+ dir_contents = [
+ os.path.join(expanded_path, path)
+ for path in dir_contents
+ if (os.path.isfile(os.path.join(expanded_path, path)) and
+ re.match("^[a-zA-Z0-9_-]+$", path) is not None)
+ ]
+ for entry in dir_contents:
+ with open(entry, "r") as fp:
+ src_data = fp.read().strip()
+ abs_entry = os.path.abspath(entry)
+ parse_deb_config_data(
+ ifaces, src_data,
+ os.path.dirname(abs_entry), abs_entry)
+ elif option == "source":
+ new_src_path = split[1]
+ if not new_src_path.startswith("/"):
+ new_src_path = os.path.join(src_dir, new_src_path)
+ for expanded_path in glob.glob(new_src_path):
+ with open(expanded_path, "r") as fp:
+ src_data = fp.read().strip()
+ abs_path = os.path.abspath(expanded_path)
+ parse_deb_config_data(
+ ifaces, src_data,
+ os.path.dirname(abs_path), abs_path)
+ elif option == "auto":
+ for iface in split[1:]:
+ if iface not in ifaces:
+ ifaces[iface] = {
+ # Include the source path this interface was found in.
+ "_source_path": src_path
+ }
+ ifaces[iface]['auto'] = True
+ elif option == "iface":
+ iface, family, method = split[1:4]
+ if iface not in ifaces:
+ ifaces[iface] = {
+ # Include the source path this interface was found in.
+ "_source_path": src_path
+ }
+ elif 'family' in ifaces[iface]:
+ raise ParserError(
+ "Interface %s can only be defined once. "
+ "Re-defined in '%s'." % (iface, src_path))
+ ifaces[iface]['family'] = family
+ ifaces[iface]['method'] = method
+ currif = iface
+ elif option == "hwaddress":
+ ifaces[currif]['hwaddress'] = split[1]
+ elif option in NET_CONFIG_OPTIONS:
+ ifaces[currif][option] = split[1]
+ elif option in NET_CONFIG_COMMANDS:
+ if option not in ifaces[currif]:
+ ifaces[currif][option] = []
+ ifaces[currif][option].append(' '.join(split[1:]))
+ elif option.startswith('dns-'):
+ if 'dns' not in ifaces[currif]:
+ ifaces[currif]['dns'] = {}
+ if option == 'dns-search':
+ ifaces[currif]['dns']['search'] = []
+ for domain in split[1:]:
+ ifaces[currif]['dns']['search'].append(domain)
+ elif option == 'dns-nameservers':
+ ifaces[currif]['dns']['nameservers'] = []
+ for server in split[1:]:
+ ifaces[currif]['dns']['nameservers'].append(server)
+ elif option.startswith('bridge_'):
+ if 'bridge' not in ifaces[currif]:
+ ifaces[currif]['bridge'] = {}
+ if option in NET_CONFIG_BRIDGE_OPTIONS:
+ bridge_option = option.replace('bridge_', '', 1)
+ ifaces[currif]['bridge'][bridge_option] = split[1]
+ elif option == "bridge_ports":
+ ifaces[currif]['bridge']['ports'] = []
+ for iface in split[1:]:
+ ifaces[currif]['bridge']['ports'].append(iface)
+ elif option == "bridge_hw" and split[1].lower() == "mac":
+ ifaces[currif]['bridge']['mac'] = split[2]
+ elif option == "bridge_pathcost":
+ if 'pathcost' not in ifaces[currif]['bridge']:
+ ifaces[currif]['bridge']['pathcost'] = {}
+ ifaces[currif]['bridge']['pathcost'][split[1]] = split[2]
+ elif option == "bridge_portprio":
+ if 'portprio' not in ifaces[currif]['bridge']:
+ ifaces[currif]['bridge']['portprio'] = {}
+ ifaces[currif]['bridge']['portprio'][split[1]] = split[2]
+ elif option.startswith('bond-'):
+ if 'bond' not in ifaces[currif]:
+ ifaces[currif]['bond'] = {}
+ bond_option = option.replace('bond-', '', 1)
+ ifaces[currif]['bond'][bond_option] = split[1]
+ for iface in ifaces.keys():
+ if 'auto' not in ifaces[iface]:
+ ifaces[iface]['auto'] = False
+
+
+def parse_deb_config(path):
+ """Parses a debian network configuration file."""
+ ifaces = {}
+ with open(path, "r") as fp:
+ contents = fp.read().strip()
+ abs_path = os.path.abspath(path)
+ parse_deb_config_data(
+ ifaces, contents,
+ os.path.dirname(abs_path), abs_path)
+ return ifaces
+
+
+def parse_net_config_data(net_config):
+ """Parses the config, returns NetworkState dictionary
+
+ :param net_config: curtin network config dict
+ """
+ state = None
+ if 'version' in net_config and 'config' in net_config:
+ ns = network_state.NetworkState(version=net_config.get('version'),
+ config=net_config.get('config'))
+ ns.parse_config()
+ state = ns.network_state
+
+ return state
+
+
+def parse_net_config(path):
+ """Parses a curtin network configuration file and
+ return network state"""
+ ns = None
+ net_config = util.read_conf(path)
+ if 'network' in net_config:
+ ns = parse_net_config_data(net_config.get('network'))
+
+ return ns
+
+
+def _load_shell_content(content, add_empty=False, empty_val=None):
+ """Given shell like syntax (key=value\nkey2=value2\n) in content
+ return the data in dictionary form. If 'add_empty' is True
+ then add entries in to the returned dictionary for 'VAR='
+ variables. Set their value to empty_val."""
+ data = {}
+ for line in shlex.split(content):
+ key, value = line.split("=", 1)
+ if not value:
+ value = empty_val
+ if add_empty or value:
+ data[key] = value
+
+ return data
+
+
+def _klibc_to_config_entry(content, mac_addrs=None):
+ """Convert a klibc writtent shell content file to a 'config' entry
+ When ip= is seen on the kernel command line in debian initramfs
+ and networking is brought up, ipconfig will populate
+ /run/net-<name>.cfg.
+
+ The files are shell style syntax, and examples are in the tests
+ provided here. There is no good documentation on this unfortunately.
+
+ DEVICE=<name> is expected/required and PROTO should indicate if
+ this is 'static' or 'dhcp'.
+ """
+
+ if mac_addrs is None:
+ mac_addrs = {}
+
+ data = _load_shell_content(content)
+ try:
+ name = data['DEVICE']
+ except KeyError:
+ raise ValueError("no 'DEVICE' entry in data")
+
+ # ipconfig on precise does not write PROTO
+ proto = data.get('PROTO')
+ if not proto:
+ if data.get('filename'):
+ proto = 'dhcp'
+ else:
+ proto = 'static'
+
+ if proto not in ('static', 'dhcp'):
+ raise ValueError("Unexpected value for PROTO: %s" % proto)
+
+ iface = {
+ 'type': 'physical',
+ 'name': name,
+ 'subnets': [],
+ }
+
+ if name in mac_addrs:
+ iface['mac_address'] = mac_addrs[name]
+
+ # originally believed there might be IPV6* values
+ for v, pre in (('ipv4', 'IPV4'),):
+ # if no IPV4ADDR or IPV6ADDR, then go on.
+ if pre + "ADDR" not in data:
+ continue
+ subnet = {'type': proto}
+
+ # these fields go right on the subnet
+ for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
+ if pre + key in data:
+ subnet[key.lower()] = data[pre + key]
+
+ dns = []
+ # handle IPV4DNS0 or IPV6DNS0
+ for nskey in ('DNS0', 'DNS1'):
+ ns = data.get(pre + nskey)
+ # verify it has something other than 0.0.0.0 (or ipv6)
+ if ns and len(ns.strip(":.0")):
+ dns.append(data[pre + nskey])
+ if dns:
+ subnet['dns_nameservers'] = dns
+ # add search to both ipv4 and ipv6, as it has no namespace
+ search = data.get('DOMAINSEARCH')
+ if search:
+ if ',' in search:
+ subnet['dns_search'] = search.split(",")
+ else:
+ subnet['dns_search'] = search.split()
+
+ iface['subnets'].append(subnet)
+
+ return name, iface
+
+
+def config_from_klibc_net_cfg(files=None, mac_addrs=None):
+ if files is None:
+ files = glob.glob('/run/net*.conf')
+
+ entries = []
+ names = {}
+ for cfg_file in files:
+ name, entry = _klibc_to_config_entry(util.load_file(cfg_file),
+ mac_addrs=mac_addrs)
+ if name in names:
+ raise ValueError(
+ "device '%s' defined multiple times: %s and %s" % (
+ name, names[name], cfg_file))
+
+ names[name] = cfg_file
+ entries.append(entry)
+ return {'config': entries, 'version': 1}
+
+
+def render_persistent_net(network_state):
+ ''' Given state, emit udev rules to map
+ mac to ifname
+ '''
+ content = ""
+ interfaces = network_state.get('interfaces')
+ for iface in interfaces.values():
+ # for physical interfaces write out a persist net udev rule
+ if iface['type'] == 'physical' and \
+ 'name' in iface and iface.get('mac_address'):
+ content += generate_udev_rule(iface['name'],
+ iface['mac_address'])
+
+ return content
+
+
+# TODO: switch valid_map based on mode inet/inet6
+def iface_add_subnet(iface, subnet):
+ content = ""
+ valid_map = [
+ 'address',
+ 'netmask',
+ 'broadcast',
+ 'metric',
+ 'gateway',
+ 'pointopoint',
+ 'mtu',
+ 'scope',
+ 'dns_search',
+ 'dns_nameservers',
+ ]
+ for key, value in subnet.items():
+ if value and key in valid_map:
+ if type(value) == list:
+ value = " ".join(value)
+ if '_' in key:
+ key = key.replace('_', '-')
+ content += " {} {}\n".format(key, value)
+
+ return content
+
+
+# TODO: switch to valid_map for attrs
+def iface_add_attrs(iface):
+ content = ""
+ ignore_map = [
+ 'type',
+ 'name',
+ 'inet',
+ 'mode',
+ 'index',
+ 'subnets',
+ ]
+ if iface['type'] not in ['bond', 'bridge', 'vlan']:
+ ignore_map.append('mac_address')
+
+ for key, value in iface.items():
+ if value and key not in ignore_map:
+ if type(value) == list:
+ value = " ".join(value)
+ content += " {} {}\n".format(key, value)
+
+ return content
+
+
+def render_route(route, indent=""):
+ """ When rendering routes for an iface, in some cases applying a route
+ may result in the route command returning non-zero which produces
+ some confusing output for users manually using ifup/ifdown[1]. To
+ that end, we will optionally include an '|| true' postfix to each
+ route line allowing users to work with ifup/ifdown without using
+ --force option.
+
+ We may at somepoint not want to emit this additional postfix, and
+ add a 'strict' flag to this function. When called with strict=True,
+ then we will not append the postfix.
+
+ 1. http://askubuntu.com/questions/168033/
+ how-to-set-static-routes-in-ubuntu-server
+ """
+ content = ""
+ up = indent + "post-up route add"
+ down = indent + "pre-down route del"
+ eol = " || true\n"
+ mapping = {
+ 'network': '-net',
+ 'netmask': 'netmask',
+ 'gateway': 'gw',
+ 'metric': 'metric',
+ }
+ if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
+ default_gw = " default gw %s" % route['gateway']
+ content += up + default_gw + eol
+ content += down + default_gw + eol
+ elif route['network'] == '::' and route['netmask'] == 0:
+ # ipv6!
+ default_gw = " -A inet6 default gw %s" % route['gateway']
+ content += up + default_gw + eol
+ content += down + default_gw + eol
+ else:
+ route_line = ""
+ for k in ['network', 'netmask', 'gateway', 'metric']:
+ if k in route:
+ route_line += " %s %s" % (mapping[k], route[k])
+ content += up + route_line + eol
+ content += down + route_line + eol
+
+ return content
+
+
+def render_interfaces(network_state):
+ ''' Given state, emit etc/network/interfaces content '''
+
+ content = ""
+ interfaces = network_state.get('interfaces')
+ ''' Apply a sort order to ensure that we write out
+ the physical interfaces first; this is critical for
+ bonding
+ '''
+ order = {
+ 'physical': 0,
+ 'bond': 1,
+ 'bridge': 2,
+ 'vlan': 3,
+ }
+ content += "auto lo\niface lo inet loopback\n"
+ for dnskey, value in network_state.get('dns', {}).items():
+ if len(value):
+ content += " dns-{} {}\n".format(dnskey, " ".join(value))
+
+ content += "\n"
+ for iface in sorted(interfaces.values(),
+ key=lambda k: (order[k['type']], k['name'])):
+ content += "auto {name}\n".format(**iface)
+
+ subnets = iface.get('subnets', {})
+ if subnets:
+ for index, subnet in zip(range(0, len(subnets)), subnets):
+ iface['index'] = index
+ iface['mode'] = subnet['type']
+ if iface['mode'].endswith('6'):
+ iface['inet'] += '6'
+ elif iface['mode'] == 'static' and ":" in subnet['address']:
+ iface['inet'] += '6'
+ if iface['mode'].startswith('dhcp'):
+ iface['mode'] = 'dhcp'
+
+ if index == 0:
+ content += "iface {name} {inet} {mode}\n".format(**iface)
+ else:
+ content += "auto {name}:{index}\n".format(**iface)
+ content += \
+ "iface {name}:{index} {inet} {mode}\n".format(**iface)
+
+ content += iface_add_subnet(iface, subnet)
+ content += iface_add_attrs(iface)
+ for route in subnet.get('routes', []):
+ content += render_route(route, indent=" ")
+ content += "\n"
+ else:
+ content += "iface {name} {inet} {mode}\n".format(**iface)
+ content += iface_add_attrs(iface)
+ content += "\n"
+
+ for route in network_state.get('routes'):
+ content += render_route(route)
+
+ # global replacements until v2 format
+ content = content.replace('mac_address', 'hwaddress ether')
+ return content
+
+
+def render_network_state(target, network_state, eni="etc/network/interfaces",
+ links_prefix=LINKS_FNAME_PREFIX,
+ netrules='etc/udev/rules.d/70-persistent-net.rules'):
+
+ fpeni = os.path.sep.join((target, eni,))
+ util.ensure_dir(os.path.dirname(fpeni))
+ with open(fpeni, 'w+') as f:
+ f.write(render_interfaces(network_state))
+
+ if netrules:
+ netrules = os.path.sep.join((target, netrules,))
+ util.ensure_dir(os.path.dirname(netrules))
+ with open(netrules, 'w+') as f:
+ f.write(render_persistent_net(network_state))
+
+ if links_prefix:
+ render_systemd_links(target, network_state, links_prefix)
+
+
+def render_systemd_links(target, network_state,
+ links_prefix=LINKS_FNAME_PREFIX):
+ fp_prefix = os.path.sep.join((target, links_prefix))
+ for f in glob.glob(fp_prefix + "*"):
+ os.unlink(f)
+
+ interfaces = network_state.get('interfaces')
+ for iface in interfaces.values():
+ if (iface['type'] == 'physical' and 'name' in iface and
+ iface.get('mac_address')):
+ fname = fp_prefix + iface['name'] + ".link"
+ with open(fname, "w") as fp:
+ fp.write("\n".join([
+ "[Match]",
+ "MACAddress=" + iface['mac_address'],
+ "",
+ "[Link]",
+ "Name=" + iface['name'],
+ ""
+ ]))
+
+
+def is_disabled_cfg(cfg):
+ if not cfg or not isinstance(cfg, dict):
+ return False
+ return cfg.get('config') == "disabled"
+
+
+def sys_netdev_info(name, field):
+ if not os.path.exists(os.path.join(SYS_CLASS_NET, name)):
+ raise OSError("%s: interface does not exist in %s" %
+ (name, SYS_CLASS_NET))
+
+ fname = os.path.join(SYS_CLASS_NET, name, field)
+ if not os.path.exists(fname):
+ raise OSError("%s: could not find sysfs entry: %s" % (name, fname))
+ data = util.load_file(fname)
+ if data[-1] == '\n':
+ data = data[:-1]
+ return data
+
+
+def generate_fallback_config():
+ """Determine which attached net dev is most likely to have a connection and
+ generate network state to run dhcp on that interface"""
+ # by default use eth0 as primary interface
+ nconf = {'config': [], 'version': 1}
+
+ # get list of interfaces that could have connections
+ invalid_interfaces = set(['lo'])
+ potential_interfaces = set(get_devicelist())
+ potential_interfaces = potential_interfaces.difference(invalid_interfaces)
+ # sort into interfaces with carrier, interfaces which could have carrier,
+ # and ignore interfaces that are definitely disconnected
+ connected = []
+ possibly_connected = []
+ for interface in potential_interfaces:
+ try:
+ carrier = int(sys_netdev_info(interface, 'carrier'))
+ if carrier:
+ connected.append(interface)
+ continue
+ except OSError:
+ pass
+ # check if nic is dormant or down, as this may make a nick appear to
+ # not have a carrier even though it could acquire one when brought
+ # online by dhclient
+ try:
+ dormant = int(sys_netdev_info(interface, 'dormant'))
+ if dormant:
+ possibly_connected.append(interface)
+ continue
+ except OSError:
+ pass
+ try:
+ operstate = sys_netdev_info(interface, 'operstate')
+ if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']:
+ possibly_connected.append(interface)
+ continue
+ except OSError:
+ pass
+
+ # don't bother with interfaces that might not be connected if there are
+ # some that definitely are
+ if connected:
+ potential_interfaces = connected
+ else:
+ potential_interfaces = possibly_connected
+ # if there are no interfaces, give up
+ if not potential_interfaces:
+ return
+ # if eth0 exists use it above anything else, otherwise get the interface
+ # that looks 'first'
+ if DEFAULT_PRIMARY_INTERFACE in potential_interfaces:
+ name = DEFAULT_PRIMARY_INTERFACE
+ else:
+ name = sorted(potential_interfaces)[0]
+
+ mac = sys_netdev_info(name, 'address')
+ target_name = name
+
+ nconf['config'].append(
+ {'type': 'physical', 'name': target_name,
+ 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
+ return nconf
+
+
+def _decomp_gzip(blob, strict=True):
+ # decompress blob. raise exception if not compressed unless strict=False.
+ with io.BytesIO(blob) as iobuf:
+ gzfp = None
+ try:
+ gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
+ return gzfp.read()
+ except IOError:
+ if strict:
+ raise
+ return blob
+ finally:
+ if gzfp:
+ gzfp.close()
+
+
+def _b64dgz(b64str, gzipped="try"):
+ # decode a base64 string. If gzipped is true, transparently uncompresss
+ # if gzipped is 'try', then try gunzip, returning the original on fail.
+ try:
+ blob = base64.b64decode(b64str)
+ except TypeError:
+ raise ValueError("Invalid base64 text: %s" % b64str)
+
+ if not gzipped:
+ return blob
+
+ return _decomp_gzip(blob, strict=gzipped != "try")
+
+
+def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):
+ if cmdline is None:
+ cmdline = util.get_cmdline()
+
+ if 'network-config=' in cmdline:
+ data64 = None
+ for tok in cmdline.split():
+ if tok.startswith("network-config="):
+ data64 = tok.split("=", 1)[1]
+ if data64:
+ return util.load_yaml(_b64dgz(data64))
+
+ if 'ip=' not in cmdline:
+ return None
+
+ if mac_addrs is None:
+ mac_addrs = {k: sys_netdev_info(k, 'address')
+ for k in get_devicelist()}
+
+ return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs)
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
new file mode 100644
index 00000000..e32d2cdf
--- /dev/null
+++ b/cloudinit/net/network_state.py
@@ -0,0 +1,446 @@
+# Copyright (C) 2013-2014 Canonical Ltd.
+#
+# Author: Ryan Harper <ryan.harper@canonical.com>
+#
+# Curtin is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.util import yaml_dumps as dump_config
+
+LOG = logging.getLogger(__name__)
+
+NETWORK_STATE_VERSION = 1
+NETWORK_STATE_REQUIRED_KEYS = {
+ 1: ['version', 'config', 'network_state'],
+}
+
+
+def from_state_file(state_file):
+ network_state = None
+ state = util.read_conf(state_file)
+ network_state = NetworkState()
+ network_state.load(state)
+
+ return network_state
+
+
+class NetworkState:
+ def __init__(self, version=NETWORK_STATE_VERSION, config=None):
+ self.version = version
+ self.config = config
+ self.network_state = {
+ 'interfaces': {},
+ 'routes': [],
+ 'dns': {
+ 'nameservers': [],
+ 'search': [],
+ }
+ }
+ self.command_handlers = self.get_command_handlers()
+
+ def get_command_handlers(self):
+ METHOD_PREFIX = 'handle_'
+ methods = filter(lambda x: callable(getattr(self, x)) and
+ x.startswith(METHOD_PREFIX), dir(self))
+ handlers = {}
+ for m in methods:
+ key = m.replace(METHOD_PREFIX, '')
+ handlers[key] = getattr(self, m)
+
+ return handlers
+
+ def dump(self):
+ state = {
+ 'version': self.version,
+ 'config': self.config,
+ 'network_state': self.network_state,
+ }
+ return dump_config(state)
+
+ def load(self, state):
+ if 'version' not in state:
+ LOG.error('Invalid state, missing version field')
+ raise Exception('Invalid state, missing version field')
+
+ required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']]
+ if not self.valid_command(state, required_keys):
+ msg = 'Invalid state, missing keys: {}'.format(required_keys)
+ LOG.error(msg)
+ raise Exception(msg)
+
+ # v1 - direct attr mapping, except version
+ for key in [k for k in required_keys if k not in ['version']]:
+ setattr(self, key, state[key])
+ self.command_handlers = self.get_command_handlers()
+
+ def dump_network_state(self):
+ return dump_config(self.network_state)
+
+ def parse_config(self):
+ # rebuild network state
+ for command in self.config:
+ handler = self.command_handlers.get(command['type'])
+ handler(command)
+
+ def valid_command(self, command, required_keys):
+ if not required_keys:
+ return False
+
+ found_keys = [key for key in command.keys() if key in required_keys]
+ return len(found_keys) == len(required_keys)
+
+ def handle_physical(self, command):
+ '''
+ command = {
+ 'type': 'physical',
+ 'mac_address': 'c0:d6:9f:2c:e8:80',
+ 'name': 'eth0',
+ 'subnets': [
+ {'type': 'dhcp4'}
+ ]
+ }
+ '''
+ required_keys = [
+ 'name',
+ ]
+ if not self.valid_command(command, required_keys):
+ LOG.warn('Skipping Invalid command: {}'.format(command))
+ LOG.debug(self.dump_network_state())
+ return
+
+ interfaces = self.network_state.get('interfaces')
+ iface = interfaces.get(command['name'], {})
+ for param, val in command.get('params', {}).items():
+ iface.update({param: val})
+
+ # convert subnet ipv6 netmask to cidr as needed
+ subnets = command.get('subnets')
+ if subnets:
+ for subnet in subnets:
+ if subnet['type'] == 'static':
+ if 'netmask' in subnet and ':' in subnet['address']:
+ subnet['netmask'] = mask2cidr(subnet['netmask'])
+ for route in subnet.get('routes', []):
+ if 'netmask' in route:
+ route['netmask'] = mask2cidr(route['netmask'])
+ iface.update({
+ 'name': command.get('name'),
+ 'type': command.get('type'),
+ 'mac_address': command.get('mac_address'),
+ 'inet': 'inet',
+ 'mode': 'manual',
+ 'mtu': command.get('mtu'),
+ 'address': None,
+ 'gateway': None,
+ 'subnets': subnets,
+ })
+ self.network_state['interfaces'].update({command.get('name'): iface})
+ self.dump_network_state()
+
+ def handle_vlan(self, command):
+ '''
+ auto eth0.222
+ iface eth0.222 inet static
+ address 10.10.10.1
+ netmask 255.255.255.0
+ hwaddress ether BC:76:4E:06:96:B3
+ vlan-raw-device eth0
+ '''
+ required_keys = [
+ 'name',
+ 'vlan_link',
+ 'vlan_id',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ interfaces = self.network_state.get('interfaces')
+ self.handle_physical(command)
+ iface = interfaces.get(command.get('name'), {})
+ iface['vlan-raw-device'] = command.get('vlan_link')
+ iface['vlan_id'] = command.get('vlan_id')
+ interfaces.update({iface['name']: iface})
+
+ def handle_bond(self, command):
+ '''
+ #/etc/network/interfaces
+ auto eth0
+ iface eth0 inet manual
+ bond-master bond0
+ bond-mode 802.3ad
+
+ auto eth1
+ iface eth1 inet manual
+ bond-master bond0
+ bond-mode 802.3ad
+
+ auto bond0
+ iface bond0 inet static
+ address 192.168.0.10
+ gateway 192.168.0.1
+ netmask 255.255.255.0
+ bond-slaves none
+ bond-mode 802.3ad
+ bond-miimon 100
+ bond-downdelay 200
+ bond-updelay 200
+ bond-lacp-rate 4
+ '''
+ required_keys = [
+ 'name',
+ 'bond_interfaces',
+ 'params',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ self.handle_physical(command)
+ interfaces = self.network_state.get('interfaces')
+ iface = interfaces.get(command.get('name'), {})
+ for param, val in command.get('params').items():
+ iface.update({param: val})
+ iface.update({'bond-slaves': 'none'})
+ self.network_state['interfaces'].update({iface['name']: iface})
+
+ # handle bond slaves
+ for ifname in command.get('bond_interfaces'):
+ if ifname not in interfaces:
+ cmd = {
+ 'name': ifname,
+ 'type': 'bond',
+ }
+ # inject placeholder
+ self.handle_physical(cmd)
+
+ interfaces = self.network_state.get('interfaces')
+ bond_if = interfaces.get(ifname)
+ bond_if['bond-master'] = command.get('name')
+ # copy in bond config into slave
+ for param, val in command.get('params').items():
+ bond_if.update({param: val})
+ self.network_state['interfaces'].update({ifname: bond_if})
+
+ def handle_bridge(self, command):
+ '''
+ auto br0
+ iface br0 inet static
+ address 10.10.10.1
+ netmask 255.255.255.0
+ bridge_ports eth0 eth1
+ bridge_stp off
+ bridge_fd 0
+ bridge_maxwait 0
+
+ bridge_params = [
+ "bridge_ports",
+ "bridge_ageing",
+ "bridge_bridgeprio",
+ "bridge_fd",
+ "bridge_gcint",
+ "bridge_hello",
+ "bridge_hw",
+ "bridge_maxage",
+ "bridge_maxwait",
+ "bridge_pathcost",
+ "bridge_portprio",
+ "bridge_stp",
+ "bridge_waitport",
+ ]
+ '''
+ required_keys = [
+ 'name',
+ 'bridge_interfaces',
+ 'params',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ # find one of the bridge port ifaces to get mac_addr
+ # handle bridge_slaves
+ interfaces = self.network_state.get('interfaces')
+ for ifname in command.get('bridge_interfaces'):
+ if ifname in interfaces:
+ continue
+
+ cmd = {
+ 'name': ifname,
+ }
+ # inject placeholder
+ self.handle_physical(cmd)
+
+ interfaces = self.network_state.get('interfaces')
+ self.handle_physical(command)
+ iface = interfaces.get(command.get('name'), {})
+ iface['bridge_ports'] = command['bridge_interfaces']
+ for param, val in command.get('params').items():
+ iface.update({param: val})
+
+ interfaces.update({iface['name']: iface})
+
+ def handle_nameserver(self, command):
+ required_keys = [
+ 'address',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ dns = self.network_state.get('dns')
+ if 'address' in command:
+ addrs = command['address']
+ if not type(addrs) == list:
+ addrs = [addrs]
+ for addr in addrs:
+ dns['nameservers'].append(addr)
+ if 'search' in command:
+ paths = command['search']
+ if not isinstance(paths, list):
+ paths = [paths]
+ for path in paths:
+ dns['search'].append(path)
+
+ def handle_route(self, command):
+ required_keys = [
+ 'destination',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ routes = self.network_state.get('routes')
+ network, cidr = command['destination'].split("/")
+ netmask = cidr2mask(int(cidr))
+ route = {
+ 'network': network,
+ 'netmask': netmask,
+ 'gateway': command.get('gateway'),
+ 'metric': command.get('metric'),
+ }
+ routes.append(route)
+
+
+def cidr2mask(cidr):
+ mask = [0, 0, 0, 0]
+ for i in list(range(0, cidr)):
+ idx = int(i / 8)
+ mask[idx] = mask[idx] + (1 << (7 - i % 8))
+ return ".".join([str(x) for x in mask])
+
+
+def ipv4mask2cidr(mask):
+ if '.' not in mask:
+ return mask
+ return sum([bin(int(x)).count('1') for x in mask.split('.')])
+
+
+def ipv6mask2cidr(mask):
+ if ':' not in mask:
+ return mask
+
+ bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
+ 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
+ 0xfffe, 0xffff]
+ cidr = 0
+ for word in mask.split(':'):
+ if not word or int(word, 16) == 0:
+ break
+ cidr += bitCount.index(int(word, 16))
+
+ return cidr
+
+
+def mask2cidr(mask):
+ if ':' in mask:
+ return ipv6mask2cidr(mask)
+ elif '.' in mask:
+ return ipv4mask2cidr(mask)
+ else:
+ return mask
+
+
+if __name__ == '__main__':
+ import sys
+ import random
+ from cloudinit import net
+
+ def load_config(nc):
+ version = nc.get('version')
+ config = nc.get('config')
+ return (version, config)
+
+ def test_parse(network_config):
+ (version, config) = load_config(network_config)
+ ns1 = NetworkState(version=version, config=config)
+ ns1.parse_config()
+ random.shuffle(config)
+ ns2 = NetworkState(version=version, config=config)
+ ns2.parse_config()
+ print("----NS1-----")
+ print(ns1.dump_network_state())
+ print()
+ print("----NS2-----")
+ print(ns2.dump_network_state())
+ print("NS1 == NS2 ?=> {}".format(
+ ns1.network_state == ns2.network_state))
+ eni = net.render_interfaces(ns2.network_state)
+ print(eni)
+ udev_rules = net.render_persistent_net(ns2.network_state)
+ print(udev_rules)
+
+ def test_dump_and_load(network_config):
+ print("Loading network_config into NetworkState")
+ (version, config) = load_config(network_config)
+ ns1 = NetworkState(version=version, config=config)
+ ns1.parse_config()
+ print("Dumping state to file")
+ ns1_dump = ns1.dump()
+ ns1_state = "/tmp/ns1.state"
+ with open(ns1_state, "w+") as f:
+ f.write(ns1_dump)
+
+ print("Loading state from file")
+ ns2 = from_state_file(ns1_state)
+ print("NS1 == NS2 ?=> {}".format(
+ ns1.network_state == ns2.network_state))
+
+ def test_output(network_config):
+ (version, config) = load_config(network_config)
+ ns1 = NetworkState(version=version, config=config)
+ ns1.parse_config()
+ random.shuffle(config)
+ ns2 = NetworkState(version=version, config=config)
+ ns2.parse_config()
+ print("NS1 == NS2 ?=> {}".format(
+ ns1.network_state == ns2.network_state))
+ eni_1 = net.render_interfaces(ns1.network_state)
+ eni_2 = net.render_interfaces(ns2.network_state)
+ print(eni_1)
+ print(eni_2)
+ print("eni_1 == eni_2 ?=> {}".format(
+ eni_1 == eni_2))
+
+ y = util.read_conf(sys.argv[1])
+ network_config = y.get('network')
+ test_parse(network_config)
+ test_dump_and_load(network_config)
+ test_output(network_config)
diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py
new file mode 100644
index 00000000..6435ace0
--- /dev/null
+++ b/cloudinit/net/udev.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2015 Canonical Ltd.
+#
+# Author: Ryan Harper <ryan.harper@canonical.com>
+#
+# Curtin is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
+
+
+def compose_udev_equality(key, value):
+ """Return a udev comparison clause, like `ACTION=="add"`."""
+ assert key == key.upper()
+ return '%s=="%s"' % (key, value)
+
+
+def compose_udev_attr_equality(attribute, value):
+ """Return a udev attribute comparison clause, like `ATTR{type}=="1"`."""
+ assert attribute == attribute.lower()
+ return 'ATTR{%s}=="%s"' % (attribute, value)
+
+
+def compose_udev_setting(key, value):
+ """Return a udev assignment clause, like `NAME="eth0"`."""
+ assert key == key.upper()
+ return '%s="%s"' % (key, value)
+
+
+def generate_udev_rule(interface, mac):
+ """Return a udev rule to set the name of network interface with `mac`.
+
+ The rule ends up as a single line looking something like:
+
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*",
+ ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0"
+ """
+ rule = ', '.join([
+ compose_udev_equality('SUBSYSTEM', 'net'),
+ compose_udev_equality('ACTION', 'add'),
+ compose_udev_equality('DRIVERS', '?*'),
+ compose_udev_attr_equality('address', mac),
+ compose_udev_setting('NAME', interface),
+ ])
+ return '%s\n' % rule
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index fb40cc0d..e30d6fb5 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -87,7 +87,7 @@ def netdev_info(empty=""):
devs[curdev][target] = toks[i][len(field) + 1:]
if empty != "":
- for (_devname, dev) in devs.iteritems():
+ for (_devname, dev) in devs.items():
for field in dev:
if dev[field] == "":
dev[field] = empty
@@ -181,7 +181,7 @@ def netdev_pformat():
else:
fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
tbl = PrettyTable(fields)
- for (dev, d) in netdev.iteritems():
+ for (dev, d) in netdev.items():
tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
if d.get('addr6'):
tbl.add_row([dev, d["up"],
diff --git a/cloudinit/registry.py b/cloudinit/registry.py
new file mode 100644
index 00000000..04368ddf
--- /dev/null
+++ b/cloudinit/registry.py
@@ -0,0 +1,37 @@
+# Copyright 2015 Canonical Ltd.
+# This file is part of cloud-init. See LICENCE file for license information.
+#
+# vi: ts=4 expandtab
+import copy
+
+
+class DictRegistry(object):
+ """A simple registry for a mapping of objects."""
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self._items = {}
+
+ def register_item(self, key, item):
+ """Add item to the registry."""
+ if key in self._items:
+ raise ValueError(
+ 'Item already registered with key {0}'.format(key))
+ self._items[key] = item
+
+ def unregister_item(self, key, force=True):
+ """Remove item from the registry."""
+ if key in self._items:
+ del self._items[key]
+ elif not force:
+ raise KeyError("%s: key not present to unregister" % key)
+
+ @property
+ def registered_items(self):
+ """All the items that have been registered.
+
+ This cannot be used to modify the contents of the registry.
+ """
+ return copy.copy(self._items)
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
new file mode 100644
index 00000000..6b41ae61
--- /dev/null
+++ b/cloudinit/reporting/__init__.py
@@ -0,0 +1,42 @@
+# Copyright 2015 Canonical Ltd.
+# This file is part of cloud-init. See LICENCE file for license information.
+#
+"""
+cloud-init reporting framework
+
+The reporting framework is intended to allow all parts of cloud-init to
+report events in a structured manner.
+"""
+
+from ..registry import DictRegistry
+from .handlers import available_handlers
+
+DEFAULT_CONFIG = {
+ 'logging': {'type': 'log'},
+}
+
+
+def update_configuration(config):
+ """Update the instanciated_handler_registry.
+
+ :param config:
+ The dictionary containing changes to apply. If a key is given
+ with a False-ish value, the registered handler matching that name
+ will be unregistered.
+ """
+ for handler_name, handler_config in config.items():
+ if not handler_config:
+ instantiated_handler_registry.unregister_item(
+ handler_name, force=True)
+ continue
+ handler_config = handler_config.copy()
+ cls = available_handlers.registered_items[handler_config.pop('type')]
+ instantiated_handler_registry.unregister_item(handler_name)
+ instance = cls(**handler_config)
+ instantiated_handler_registry.register_item(handler_name, instance)
+
+
+instantiated_handler_registry = DictRegistry()
+update_configuration(DEFAULT_CONFIG)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
new file mode 100644
index 00000000..2f767f64
--- /dev/null
+++ b/cloudinit/reporting/events.py
@@ -0,0 +1,246 @@
+# Copyright 2015 Canonical Ltd.
+# This file is part of cloud-init. See LICENCE file for license information.
+#
+"""
+events for reporting.
+
+The events here are designed to be used with reporting.
+They can be published to registered handlers with report_event.
+"""
+import base64
+import os.path
+import time
+
+from . import instantiated_handler_registry
+
+FINISH_EVENT_TYPE = 'finish'
+START_EVENT_TYPE = 'start'
+
+DEFAULT_EVENT_ORIGIN = 'cloudinit'
+
+
+class _nameset(set):
+ def __getattr__(self, name):
+ if name in self:
+ return name
+ raise AttributeError("%s not a valid value" % name)
+
+
+status = _nameset(("SUCCESS", "WARN", "FAIL"))
+
+
+class ReportingEvent(object):
+ """Encapsulation of event formatting."""
+
+ def __init__(self, event_type, name, description,
+ origin=DEFAULT_EVENT_ORIGIN, timestamp=time.time()):
+ self.event_type = event_type
+ self.name = name
+ self.description = description
+ self.origin = origin
+ self.timestamp = timestamp
+
+ def as_string(self):
+ """The event represented as a string."""
+ return '{0}: {1}: {2}'.format(
+ self.event_type, self.name, self.description)
+
+ def as_dict(self):
+ """The event represented as a dictionary."""
+ return {'name': self.name, 'description': self.description,
+ 'event_type': self.event_type, 'origin': self.origin,
+ 'timestamp': self.timestamp}
+
+
+class FinishReportingEvent(ReportingEvent):
+
+ def __init__(self, name, description, result=status.SUCCESS,
+ post_files=None):
+ super(FinishReportingEvent, self).__init__(
+ FINISH_EVENT_TYPE, name, description)
+ self.result = result
+ if post_files is None:
+ post_files = []
+ self.post_files = post_files
+ if result not in status:
+ raise ValueError("Invalid result: %s" % result)
+
+ def as_string(self):
+ return '{0}: {1}: {2}: {3}'.format(
+ self.event_type, self.name, self.result, self.description)
+
+ def as_dict(self):
+ """The event represented as json friendly."""
+ data = super(FinishReportingEvent, self).as_dict()
+ data['result'] = self.result
+ if self.post_files:
+ data['files'] = _collect_file_info(self.post_files)
+ return data
+
+
+def report_event(event):
+ """Report an event to all registered event handlers.
+
+ This should generally be called via one of the other functions in
+ the reporting module.
+
+ :param event_type:
+ The type of the event; this should be a constant from the
+ reporting module.
+ """
+ for _, handler in instantiated_handler_registry.registered_items.items():
+ handler.publish_event(event)
+
+
+def report_finish_event(event_name, event_description,
+ result=status.SUCCESS, post_files=None):
+ """Report a "finish" event.
+
+ See :py:func:`.report_event` for parameter details.
+ """
+ event = FinishReportingEvent(event_name, event_description, result,
+ post_files=post_files)
+ return report_event(event)
+
+
+def report_start_event(event_name, event_description):
+ """Report a "start" event.
+
+ :param event_name:
+ The name of the event; this should be a topic which events would
+ share (e.g. it will be the same for start and finish events).
+
+ :param event_description:
+ A human-readable description of the event that has occurred.
+ """
+ event = ReportingEvent(START_EVENT_TYPE, event_name, event_description)
+ return report_event(event)
+
+
+class ReportEventStack(object):
+ """Context Manager for using :py:func:`report_event`
+
+ This enables calling :py:func:`report_start_event` and
+ :py:func:`report_finish_event` through a context manager.
+
+ :param name:
+ the name of the event
+
+ :param description:
+ the event's description, passed on to :py:func:`report_start_event`
+
+ :param message:
+ the description to use for the finish event. defaults to
+ :param:description.
+
+ :param parent:
+ :type parent: :py:class:ReportEventStack or None
+ The parent of this event. The parent is populated with
+ results of all its children. The name used in reporting
+ is <parent.name>/<name>
+
+ :param reporting_enabled:
+ Indicates if reporting events should be generated.
+ If not provided, defaults to the parent's value, or True if no parent
+ is provided.
+
+ :param result_on_exception:
+ The result value to set if an exception is caught. default
+ value is FAIL.
+ """
+ def __init__(self, name, description, message=None, parent=None,
+ reporting_enabled=None, result_on_exception=status.FAIL,
+ post_files=None):
+ self.parent = parent
+ self.name = name
+ self.description = description
+ self.message = message
+ self.result_on_exception = result_on_exception
+ self.result = status.SUCCESS
+ if post_files is None:
+ post_files = []
+ self.post_files = post_files
+
+ # use parents reporting value if not provided
+ if reporting_enabled is None:
+ if parent:
+ reporting_enabled = parent.reporting_enabled
+ else:
+ reporting_enabled = True
+ self.reporting_enabled = reporting_enabled
+
+ if parent:
+ self.fullname = '/'.join((parent.fullname, name,))
+ else:
+ self.fullname = self.name
+ self.children = {}
+
+ def __repr__(self):
+ return ("ReportEventStack(%s, %s, reporting_enabled=%s)" %
+ (self.name, self.description, self.reporting_enabled))
+
+ def __enter__(self):
+ self.result = status.SUCCESS
+ if self.reporting_enabled:
+ report_start_event(self.fullname, self.description)
+ if self.parent:
+ self.parent.children[self.name] = (None, None)
+ return self
+
+ def _childrens_finish_info(self):
+ for cand_result in (status.FAIL, status.WARN):
+ for name, (value, msg) in self.children.items():
+ if value == cand_result:
+ return (value, self.message)
+ return (self.result, self.message)
+
+ @property
+ def result(self):
+ return self._result
+
+ @result.setter
+ def result(self, value):
+ if value not in status:
+ raise ValueError("'%s' not a valid result" % value)
+ self._result = value
+
+ @property
+ def message(self):
+ if self._message is not None:
+ return self._message
+ return self.description
+
+ @message.setter
+ def message(self, value):
+ self._message = value
+
+ def _finish_info(self, exc):
+ # return tuple of description, and value
+ if exc:
+ return (self.result_on_exception, self.message)
+ return self._childrens_finish_info()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ (result, msg) = self._finish_info(exc_value)
+ if self.parent:
+ self.parent.children[self.name] = (result, msg)
+ if self.reporting_enabled:
+ report_finish_event(self.fullname, msg, result,
+ post_files=self.post_files)
+
+
+def _collect_file_info(files):
+ if not files:
+ return None
+ ret = []
+ for fname in files:
+ if not os.path.isfile(fname):
+ content = None
+ else:
+ with open(fname, "rb") as fp:
+ content = base64.b64encode(fp.read()).decode()
+ ret.append({'path': fname, 'content': content,
+ 'encoding': 'base64'})
+ return ret
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
new file mode 100644
index 00000000..3212d173
--- /dev/null
+++ b/cloudinit/reporting/handlers.py
@@ -0,0 +1,91 @@
+# vi: ts=4 expandtab
+
+import abc
+import json
+import six
+
+from ..registry import DictRegistry
+from .. import (url_helper, util)
+from .. import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class ReportingHandler(object):
+ """Base class for report handlers.
+
+ Implement :meth:`~publish_event` for controlling what
+ the handler does with an event.
+ """
+
+ @abc.abstractmethod
+ def publish_event(self, event):
+ """Publish an event."""
+
+
+class LogHandler(ReportingHandler):
+ """Publishes events to the cloud-init log at the ``DEBUG`` log level."""
+
+ def __init__(self, level="DEBUG"):
+ super(LogHandler, self).__init__()
+ if isinstance(level, int):
+ pass
+ else:
+ input_level = level
+ try:
+ level = getattr(logging, level.upper())
+ except:
+ LOG.warn("invalid level '%s', using WARN", input_level)
+ level = logging.WARN
+ self.level = level
+
+ def publish_event(self, event):
+ logger = logging.getLogger(
+ '.'.join(['cloudinit', 'reporting', event.event_type, event.name]))
+ logger.log(self.level, event.as_string())
+
+
+class PrintHandler(ReportingHandler):
+ """Print the event as a string."""
+
+ def publish_event(self, event):
+ print(event.as_string())
+
+
+class WebHookHandler(ReportingHandler):
+ def __init__(self, endpoint, consumer_key=None, token_key=None,
+ token_secret=None, consumer_secret=None, timeout=None,
+ retries=None):
+ super(WebHookHandler, self).__init__()
+
+ if any([consumer_key, token_key, token_secret, consumer_secret]):
+ self.oauth_helper = url_helper.OauthUrlHelper(
+ consumer_key=consumer_key, token_key=token_key,
+ token_secret=token_secret, consumer_secret=consumer_secret)
+ else:
+ self.oauth_helper = None
+ self.endpoint = endpoint
+ self.timeout = timeout
+ self.retries = retries
+ self.ssl_details = util.fetch_ssl_details()
+
+ def publish_event(self, event):
+ if self.oauth_helper:
+ readurl = self.oauth_helper.readurl
+ else:
+ readurl = url_helper.readurl
+ try:
+ return readurl(
+ self.endpoint, data=json.dumps(event.as_dict()),
+ timeout=self.timeout,
+ retries=self.retries, ssl_details=self.ssl_details)
+ except:
+ LOG.warn("failed posting event: %s" % event.as_string())
+
+
+available_handlers = DictRegistry()
+available_handlers.register_item('log', LogHandler)
+available_handlers.register_item('print', PrintHandler)
+available_handlers.register_item('webhook', WebHookHandler)
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 5efcb0b0..8c258ea1 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -42,12 +42,13 @@ CFG_BUILTIN = {
'CloudSigma',
'CloudStack',
'SmartOS',
+ 'Bigstep',
# At the end to act as a 'catch' when none of the above work...
'None',
],
'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
- 'syslog_fix_perms': 'syslog:adm',
+ 'syslog_fix_perms': ['syslog:adm', 'root:adm'],
'system_info': {
'paths': {
'cloud_dir': '/var/lib/cloud',
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
index 40b0c94c..0d95f506 100644
--- a/cloudinit/signal_handler.py
+++ b/cloudinit/signal_handler.py
@@ -22,7 +22,7 @@ import inspect
import signal
import sys
-from StringIO import StringIO
+from six import StringIO
from cloudinit import log as logging
from cloudinit import util
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 1e913a6e..cd61df31 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -40,9 +40,8 @@ LOG = logging.getLogger(__name__)
CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
# Shell command lists
-CMD_DMI_SYSTEM = ['/usr/sbin/dmidecode', '--string', 'system-product-name']
CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
+CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5']
META_DATA_NOT_SUPPORTED = {
'block-device-mapping': {},
@@ -100,11 +99,7 @@ class DataSourceAltCloud(sources.DataSource):
'''
Description:
Get the type for the cloud back end this instance is running on
- by examining the string returned by:
- dmidecode --string system-product-name
-
- On VMWare/vSphere dmidecode returns: RHEV Hypervisor
- On VMWare/vSphere dmidecode returns: VMware Virtual Platform
+ by examining the string returned by reading the dmi data.
Input:
None
@@ -117,26 +112,20 @@ class DataSourceAltCloud(sources.DataSource):
uname_arch = os.uname()[4]
if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process
+ # Disabling because dmi data is not available on ARM processors
LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)")
return 'UNKNOWN'
- cmd = CMD_DMI_SYSTEM
- try:
- (cmd_out, _err) = util.subp(cmd)
- except ProcessExecutionError, _err:
- LOG.debug(('Failed command: %s\n%s') % \
- (' '.join(cmd), _err.message))
- return 'UNKNOWN'
- except OSError, _err:
- LOG.debug(('Failed command: %s\n%s') % \
- (' '.join(cmd), _err.message))
+ system_name = util.read_dmi_data("system-product-name")
+ if not system_name:
return 'UNKNOWN'
- if cmd_out.upper().startswith('RHEV'):
+ sys_name = system_name.upper()
+
+ if sys_name.startswith('RHEV'):
return 'RHEV'
- if cmd_out.upper().startswith('VMWARE'):
+ if sys_name.startswith('VMWARE'):
return 'VSPHERE'
return 'UNKNOWN'
@@ -211,11 +200,11 @@ class DataSourceAltCloud(sources.DataSource):
cmd = CMD_PROBE_FLOPPY
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError, _err:
+ except ProcessExecutionError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
_err.message)
return False
- except OSError, _err:
+ except OSError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
_err.message)
return False
@@ -228,11 +217,11 @@ class DataSourceAltCloud(sources.DataSource):
cmd.append('--exit-if-exists=' + floppy_dev)
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError, _err:
+ except ProcessExecutionError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
_err.message)
return False
- except OSError, _err:
+ except OSError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
_err.message)
return False
@@ -295,7 +284,7 @@ class DataSourceAltCloud(sources.DataSource):
# In the future 'dsmode' like behavior can be added to offer user
# the ability to run before networking.
datasources = [
- (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 09bc196d..698f4cac 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -17,26 +17,30 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
+import contextlib
import crypt
import fnmatch
import os
import os.path
import time
+import xml.etree.ElementTree as ET
+
from xml.dom import minidom
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
from cloudinit import sources
from cloudinit import util
+from cloudinit.sources.helpers.azure import get_metadata_from_fabric
LOG = logging.getLogger(__name__)
DS_NAME = 'Azure'
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
AGENT_START = ['service', 'walinuxagent', 'start']
-BOUNCE_COMMAND = ['sh', '-xc',
+BOUNCE_COMMAND = [
+ 'sh', '-xc',
"i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
-DATA_DIR_CLEAN_LIST = ['SharedConfig.xml']
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START,
@@ -53,9 +57,9 @@ BUILTIN_DS_CONFIG = {
BUILTIN_CLOUD_CONFIG = {
'disk_setup': {
- 'ephemeral0': {'table_type': 'mbr',
- 'layout': True,
- 'overwrite': False},
+ 'ephemeral0': {'table_type': 'gpt',
+ 'layout': [100],
+ 'overwrite': True},
},
'fs_setup': [{'filesystem': 'ext4',
'device': 'ephemeral0.1',
@@ -65,6 +69,40 @@ BUILTIN_CLOUD_CONFIG = {
DS_CFG_PATH = ['datasource', DS_NAME]
DEF_EPHEMERAL_LABEL = 'Temporary Storage'
+# The redacted password fails to meet password complexity requirements
+# so we can safely use this to mask/redact the password in the ovf-env.xml
+DEF_PASSWD_REDACTION = 'REDACTED'
+
+
+def get_hostname(hostname_command='hostname'):
+ return util.subp(hostname_command, capture=True)[0].strip()
+
+
+def set_hostname(hostname, hostname_command='hostname'):
+ util.subp([hostname_command, hostname])
+
+
+@contextlib.contextmanager
+def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
+ """
+ Set a temporary hostname, restoring the previous hostname on exit.
+
+ Will have the value of the previous hostname when used as a context
+ manager, or None if the hostname was not changed.
+ """
+ policy = cfg['hostname_bounce']['policy']
+ previous_hostname = get_hostname(hostname_command)
+ if (not util.is_true(cfg.get('set_hostname')) or
+ util.is_false(policy) or
+ (previous_hostname == temp_hostname and policy != 'force')):
+ yield None
+ return
+ set_hostname(temp_hostname, hostname_command)
+ try:
+ yield previous_hostname
+ finally:
+ set_hostname(previous_hostname, hostname_command)
+
class DataSourceAzureNet(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
@@ -80,6 +118,54 @@ class DataSourceAzureNet(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
+ def get_metadata_from_agent(self):
+ temp_hostname = self.metadata.get('local-hostname')
+ hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
+ with temporary_hostname(temp_hostname, self.ds_cfg,
+ hostname_command=hostname_command) \
+ as previous_hostname:
+ if (previous_hostname is not None and
+ util.is_true(self.ds_cfg.get('set_hostname'))):
+ cfg = self.ds_cfg['hostname_bounce']
+ try:
+ perform_hostname_bounce(hostname=temp_hostname,
+ cfg=cfg,
+ prev_hostname=previous_hostname)
+ except Exception as e:
+ LOG.warn("Failed publishing hostname: %s", e)
+ util.logexc(LOG, "handling set_hostname failed")
+
+ try:
+ invoke_agent(self.ds_cfg['agent_command'])
+ except util.ProcessExecutionError:
+ # claim the datasource even if the command failed
+ util.logexc(LOG, "agent command '%s' failed.",
+ self.ds_cfg['agent_command'])
+
+ ddir = self.ds_cfg['data_dir']
+
+ fp_files = []
+ key_value = None
+ for pk in self.cfg.get('_pubkeys', []):
+ if pk.get('value', None):
+ key_value = pk['value']
+ LOG.debug("ssh authentication: using value from fabric")
+ else:
+ bname = str(pk['fingerprint'] + ".crt")
+ fp_files += [os.path.join(ddir, bname)]
+ LOG.debug("ssh authentication: "
+ "using fingerprint from fabirc")
+
+ missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+ func=wait_for_files,
+ args=(fp_files,))
+ if len(missing):
+ LOG.warn("Did not find files, but going on: %s", missing)
+
+ metadata = {}
+ metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
+ return metadata
+
def get_data(self):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
@@ -124,77 +210,34 @@ class DataSourceAzureNet(sources.DataSource):
LOG.debug("using files cached in %s", ddir)
# azure / hyper-v provides random data here
- seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
+ seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
+ quiet=True, decode=False)
if seed:
self.metadata['random_seed'] = seed
# now update ds_cfg to reflect contents pass in config
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
- mycfg = self.ds_cfg
- ddir = mycfg['data_dir']
-
- if found != ddir:
- cached_ovfenv = util.load_file(
- os.path.join(ddir, 'ovf-env.xml'), quiet=True)
- if cached_ovfenv != files['ovf-env.xml']:
- # source was not walinux-agent's datadir, so we have to clean
- # up so 'wait_for_files' doesn't return early due to stale data
- cleaned = []
- for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]:
- if os.path.exists(f):
- util.del_file(f)
- cleaned.append(f)
- if cleaned:
- LOG.info("removed stale file(s) in '%s': %s",
- ddir, str(cleaned))
# walinux agent writes files world readable, but expects
# the directory to be protected.
- write_files(ddir, files, dirmode=0700)
-
- # handle the hostname 'publishing'
- try:
- handle_set_hostname(mycfg.get('set_hostname'),
- self.metadata.get('local-hostname'),
- mycfg['hostname_bounce'])
- except Exception as e:
- LOG.warn("Failed publishing hostname: %s", e)
- util.logexc(LOG, "handling set_hostname failed")
-
- try:
- invoke_agent(mycfg['agent_command'])
- except util.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- mycfg['agent_command'])
-
- shcfgxml = os.path.join(ddir, "SharedConfig.xml")
- wait_for = [shcfgxml]
-
- fp_files = []
- for pk in self.cfg.get('_pubkeys', []):
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
+ write_files(ddir, files, dirmode=0o700)
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
- func=wait_for_files,
- args=(wait_for + fp_files,))
- if len(missing):
- LOG.warn("Did not find files, but going on: %s", missing)
-
- if shcfgxml in missing:
- LOG.warn("SharedConfig.xml missing, using static instance-id")
+ if self.ds_cfg['agent_command'] == '__builtin__':
+ metadata_func = get_metadata_from_fabric
else:
- try:
- self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
- except ValueError as e:
- LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
+ metadata_func = self.get_metadata_from_agent
+ try:
+ fabric_data = metadata_func()
+ except Exception as exc:
+ LOG.info("Error communicating with Azure fabric; assume we aren't"
+ " on Azure.", exc_info=True)
+ return False
- pubkeys = pubkeys_from_crt_files(fp_files)
- self.metadata['public-keys'] = pubkeys
+ self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
+ self.metadata.update(fabric_data)
- found_ephemeral = find_ephemeral_disk()
+ found_ephemeral = find_fabric_formatted_ephemeral_disk()
if found_ephemeral:
self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
LOG.debug("using detected ephemeral0 of %s", found_ephemeral)
@@ -211,35 +254,42 @@ class DataSourceAzureNet(sources.DataSource):
def get_config_obj(self):
return self.cfg
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
def count_files(mp):
return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*'))
-def find_ephemeral_part():
+def find_fabric_formatted_ephemeral_part():
"""
- Locate the default ephmeral0.1 device. This will be the first device
- that has a LABEL of DEF_EPHEMERAL_LABEL and is a NTFS device. If Azure
- gets more ephemeral devices, this logic will only identify the first
- such device.
+ Locate the first fabric formatted ephemeral device.
"""
- c_label_devs = util.find_devs_with("LABEL=%s" % DEF_EPHEMERAL_LABEL)
- c_fstype_devs = util.find_devs_with("TYPE=ntfs")
- for dev in c_label_devs:
- if dev in c_fstype_devs:
- return dev
+ potential_locations = ['/dev/disk/cloud/azure_resource-part1',
+ '/dev/disk/azure/resource-part1']
+ device_location = None
+ for potential_location in potential_locations:
+ if os.path.exists(potential_location):
+ device_location = potential_location
+ break
+ if device_location is None:
+ return None
+ ntfs_devices = util.find_devs_with("TYPE=ntfs")
+ real_device = os.path.realpath(device_location)
+ if real_device in ntfs_devices:
+ return device_location
return None
-def find_ephemeral_disk():
+def find_fabric_formatted_ephemeral_disk():
"""
Get the ephemeral disk.
"""
- part_dev = find_ephemeral_part()
- if part_dev and str(part_dev[-1]).isdigit():
- return part_dev[:-1]
- elif part_dev:
- return part_dev
+ part_dev = find_fabric_formatted_ephemeral_part()
+ if part_dev:
+ return part_dev.split('-')[0]
return None
@@ -253,7 +303,7 @@ def support_new_ephemeral(cfg):
new ephemeral device is detected, cloud-init overrides the default
frequency for both disk-setup and mounts for the current boot only.
"""
- device = find_ephemeral_part()
+ device = find_fabric_formatted_ephemeral_part()
if not device:
LOG.debug("no default fabric formated ephemeral0.1 found")
return None
@@ -298,39 +348,15 @@ def support_new_ephemeral(cfg):
return mod_list
-def handle_set_hostname(enabled, hostname, cfg):
- if not util.is_true(enabled):
- return
-
- if not hostname:
- LOG.warn("set_hostname was true but no local-hostname")
- return
-
- apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
- interface=cfg['interface'],
- command=cfg['command'],
- hostname_command=cfg['hostname_command'])
-
-
-def apply_hostname_bounce(hostname, policy, interface, command,
- hostname_command="hostname"):
+def perform_hostname_bounce(hostname, cfg, prev_hostname):
# set the hostname to 'hostname' if it is not already set to that.
# then, if policy is not off, bounce the interface using command
- prev_hostname = util.subp(hostname_command, capture=True)[0].strip()
-
- util.subp([hostname_command, hostname])
-
- msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
- (prev_hostname, hostname, policy, interface))
-
- if util.is_false(policy):
- LOG.debug("pubhname: policy false, skipping [%s]", msg)
- return
-
- if prev_hostname == hostname and policy != "force":
- LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
- return
+ command = cfg['command']
+ interface = cfg['interface']
+ policy = cfg['policy']
+ msg = ("hostname=%s policy=%s interface=%s" %
+ (hostname, policy, interface))
env = os.environ.copy()
env['interface'] = interface
env['hostname'] = hostname
@@ -343,15 +369,16 @@ def apply_hostname_bounce(hostname, policy, interface, command,
shell = not isinstance(command, (list, tuple))
# capture=False, see comments in bug 1202758 and bug 1206164.
util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
- kwargs={'args': command, 'shell': shell, 'capture': False,
- 'env': env})
+ get_uptime=True, func=util.subp,
+ kwargs={'args': command, 'shell': shell, 'capture': False,
+ 'env': env})
-def crtfile_to_pubkey(fname):
+def crtfile_to_pubkey(fname, data=None):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True)
+ (out, _err) = util.subp(['sh', '-c', pipeline, fname],
+ capture=True, data=data)
return out.rstrip()
@@ -383,14 +410,30 @@ def wait_for_files(flist, maxwait=60, naplen=.5):
def write_files(datadir, files, dirmode=None):
+
+ def _redact_password(cnt, fname):
+ """Azure provides the UserPassword in plain text. So we redact it"""
+ try:
+ root = ET.fromstring(cnt)
+ for elem in root.iter():
+ if ('UserPassword' in elem.tag and
+ elem.text != DEF_PASSWD_REDACTION):
+ elem.text = DEF_PASSWD_REDACTION
+ return ET.tostring(root)
+ except Exception:
+ LOG.critical("failed to redact userpassword in {}".format(fname))
+ return cnt
+
if not datadir:
return
if not files:
files = {}
util.ensure_dir(datadir, dirmode)
for (name, content) in files.items():
- util.write_file(filename=os.path.join(datadir, name),
- content=content, mode=0600)
+ fname = os.path.join(datadir, name)
+ if 'ovf-env.xml' in name:
+ content = _redact_password(content, fname)
+ util.write_file(filename=fname, content=content, mode=0o600)
def invoke_agent(cmd):
@@ -441,7 +484,8 @@ def load_azure_ovf_pubkeys(sshnode):
for pk_node in pubkeys:
if not pk_node.hasChildNodes():
continue
- cur = {'fingerprint': "", 'path': ""}
+
+ cur = {'fingerprint': "", 'path': "", 'value': ""}
for child in pk_node.childNodes:
if child.nodeType == text_node or not child.localName:
continue
@@ -461,20 +505,6 @@ def load_azure_ovf_pubkeys(sshnode):
return found
-def single_node_at_path(node, pathlist):
- curnode = node
- for tok in pathlist:
- results = find_child(curnode, lambda n: n.localName == tok)
- if len(results) == 0:
- raise ValueError("missing %s token in %s" % (tok, str(pathlist)))
- if len(results) > 1:
- raise ValueError("found %s nodes of type %s looking for %s" %
- (len(results), tok, str(pathlist)))
- curnode = results[0]
-
- return curnode
-
-
def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
@@ -482,7 +512,7 @@ def read_azure_ovf(contents):
raise BrokenAzureDataSource("invalid xml: %s" % e)
results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
+ lambda n: n.localName == "ProvisioningSection")
if len(results) == 0:
raise NonAzureDataSource("No ProvisioningSection")
@@ -492,7 +522,8 @@ def read_azure_ovf(contents):
provSection = results[0]
lpcs_nodes = find_child(provSection,
- lambda n: n.localName == "LinuxProvisioningConfigurationSet")
+ lambda n:
+ n.localName == "LinuxProvisioningConfigurationSet")
if len(results) == 0:
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
@@ -559,7 +590,7 @@ def read_azure_ovf(contents):
defuser = {}
if username:
defuser['name'] = username
- if password:
+ if password and DEF_PASSWD_REDACTION != password:
defuser['passwd'] = encrypt_pass(password)
defuser['lock_passwd'] = False
@@ -592,32 +623,13 @@ def load_azure_ds_dir(source_dir):
if not os.path.isfile(ovf_file):
raise NonAzureDataSource("No ovf-env file found")
- with open(ovf_file, "r") as fp:
+ with open(ovf_file, "rb") as fp:
contents = fp.read()
md, ud, cfg = read_azure_ovf(contents)
return (md, ud, cfg, {'ovf-env.xml': contents})
-def iid_from_shared_config(path):
- with open(path, "rb") as fp:
- content = fp.read()
- return iid_from_shared_config_content(content)
-
-
-def iid_from_shared_config_content(content):
- """
- find INSTANCE_ID in:
- <?xml version="1.0" encoding="utf-8"?>
- <SharedConfig version="1.0.0.0" goalStateIncarnation="1">
- <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0">
- <Service name="..." guid="{00000000-0000-0000-0000-000000000000}" />
- """
- dom = minidom.parseString(content)
- depnode = single_node_at_path(dom, ["SharedConfig", "Deployment"])
- return depnode.attributes.get('name').value
-
-
class BrokenAzureDataSource(Exception):
pass
@@ -628,7 +640,7 @@ class NonAzureDataSource(Exception):
# Used to match classes to dependencies
datasources = [
- (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
new file mode 100644
index 00000000..b5ee4129
--- /dev/null
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -0,0 +1,57 @@
+#
+# Copyright (C) 2015-2016 Bigstep Cloud Ltd.
+#
+# Author: Alexandru Sirbu <alexandru.sirbu@bigstep.com>
+#
+
+import json
+import errno
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit import url_helper
+
+LOG = logging.getLogger(__name__)
+
+
+class DataSourceBigstep(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.metadata = {}
+ self.vendordata_raw = ""
+ self.userdata_raw = ""
+
+ def get_data(self, apply_filter=False):
+ url = get_url_from_file()
+ if url is None:
+ return False
+ response = url_helper.readurl(url)
+ decoded = json.loads(response.contents)
+ self.metadata = decoded["metadata"]
+ self.vendordata_raw = decoded["vendordata_raw"]
+ self.userdata_raw = decoded["userdata_raw"]
+ return True
+
+
+def get_url_from_file():
+ try:
+ content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
+ except IOError as e:
+ # If the file doesn't exist, then the server probably isn't a Bigstep
+ # instance; otherwise, another problem exists which needs investigation
+ if e.errno == errno.ENOENT:
+ return None
+ else:
+ raise
+ return content
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 707cd0ce..f8f94759 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -44,27 +44,25 @@ class DataSourceCloudSigma(sources.DataSource):
def is_running_in_cloudsigma(self):
"""
- Uses dmidecode to detect if this instance of cloud-init is running
+ Uses dmi data to detect if this instance of cloud-init is running
in the CloudSigma's infrastructure.
"""
uname_arch = os.uname()[4]
if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process
+ # Disabling because dmi data on ARM processors
LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)")
return False
- dmidecode_path = util.which('dmidecode')
- if not dmidecode_path:
+ LOG.debug("determining hypervisor product name via dmi data")
+ sys_product_name = util.read_dmi_data("system-product-name")
+ if not sys_product_name:
+ LOG.warn("failed to get hypervisor product name via dmi data")
return False
+ else:
+ LOG.debug("detected hypervisor as %s", sys_product_name)
+ return 'cloudsigma' in sys_product_name.lower()
- LOG.debug("Determining hypervisor product name via dmidecode")
- try:
- cmd = [dmidecode_path, "--string", "system-product-name"]
- system_product_name, _ = util.subp(cmd)
- return 'cloudsigma' in system_product_name.lower()
- except:
- LOG.warn("Failed to get hypervisor product name via dmidecode")
-
+ LOG.warn("failed to query dmi data for system product name")
return False
def get_data(self):
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 1bbeca59..64595020 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -26,18 +26,54 @@
import os
import time
+from socket import inet_ntoa
+from struct import pack
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit import sources
from cloudinit import url_helper as uhelp
-from cloudinit import util
-from socket import inet_ntoa
-from struct import pack
+from cloudinit import sources, util
LOG = logging.getLogger(__name__)
+class CloudStackPasswordServerClient(object):
+ """
+ Implements password fetching from the CloudStack password server.
+
+ http://cloudstack-administration.readthedocs.org/
+ en/latest/templates.html#adding-password-management-to-your-templates
+ has documentation about the system. This implementation is following that
+ found at
+ https://github.com/shankerbalan/cloudstack-scripts/
+ blob/master/cloud-set-guest-password-debian
+ """
+
+ def __init__(self, virtual_router_address):
+ self.virtual_router_address = virtual_router_address
+
+ def _do_request(self, domu_request):
+ # The password server was in the past, a broken HTTP server, but is now
+ # fixed. wget handles this seamlessly, so it's easier to shell out to
+ # that rather than write our own handling code.
+ output, _ = util.subp([
+ 'wget', '--quiet', '--tries', '3', '--timeout', '20',
+ '--output-document', '-', '--header',
+ 'DomU_Request: {0}'.format(domu_request),
+ '{0}:8080'.format(self.virtual_router_address)
+ ])
+ return output.strip()
+
+ def get_password(self):
+ password = self._do_request('send_my_password')
+ if password in ['', 'saved_password']:
+ return None
+ if password == 'bad_request':
+ raise RuntimeError('Error when attempting to fetch root password.')
+ self._do_request('saved_password')
+ return password
+
+
class DataSourceCloudStack(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -45,10 +81,11 @@ class DataSourceCloudStack(sources.DataSource):
# Cloudstack has its metadata/userdata URLs located at
# http://<virtual-router-ip>/latest/
self.api_ver = 'latest'
- vr_addr = get_vr_address()
- if not vr_addr:
+ self.vr_addr = get_vr_address()
+ if not self.vr_addr:
raise RuntimeError("No virtual router found!")
- self.metadata_address = "http://%s/" % (vr_addr)
+ self.metadata_address = "http://%s/" % (self.vr_addr,)
+ self.cfg = {}
def _get_url_settings(self):
mcfg = self.ds_cfg
@@ -82,17 +119,20 @@ class DataSourceCloudStack(sources.DataSource):
'latest/meta-data/instance-id')]
start_time = time.time()
url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
+ timeout=timeout, status_cb=LOG.warn)
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
LOG.critical(("Giving up on waiting for the metadata from %s"
" after %s seconds"),
- urls, int(time.time() - start_time))
+ urls, int(time.time() - start_time))
return bool(url)
+ def get_config_obj(self):
+ return self.cfg
+
def get_data(self):
seed_ret = {}
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
@@ -104,12 +144,28 @@ class DataSourceCloudStack(sources.DataSource):
if not self.wait_for_metadata_service():
return False
start_time = time.time()
- self.userdata_raw = ec2.get_instance_userdata(self.api_ver,
- self.metadata_address)
+ self.userdata_raw = ec2.get_instance_userdata(
+ self.api_ver, self.metadata_address)
self.metadata = ec2.get_instance_metadata(self.api_ver,
self.metadata_address)
LOG.debug("Crawl of metadata service took %s seconds",
int(time.time() - start_time))
+ password_client = CloudStackPasswordServerClient(self.vr_addr)
+ try:
+ set_password = password_client.get_password()
+ except Exception:
+ util.logexc(LOG,
+ 'Failed to fetch password from virtual router %s',
+ self.vr_addr)
+ else:
+ if set_password:
+ self.cfg = {
+ 'ssh_pwauth': True,
+ 'password': set_password,
+ 'chpasswd': {
+ 'expire': False,
+ },
+ }
return True
except Exception:
util.logexc(LOG, 'Failed fetching from metadata service %s',
@@ -192,7 +248,7 @@ def get_vr_address():
# Used to match classes to dependencies
datasources = [
- (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 15244a0d..3fa62ef3 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import copy
import os
from cloudinit import log as logging
@@ -39,7 +40,7 @@ FS_TYPES = ('vfat', 'iso9660')
LABEL_TYPES = ('config-2',)
POSSIBLE_MOUNTS = ('sr', 'cd')
OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
+ for i in range(0, 2)))
class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
@@ -50,6 +51,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
self.ec2_metadata = None
+ self._network_config = None
+ self.network_json = None
self.files = {}
def __str__(self):
@@ -144,8 +147,25 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
+ try:
+ self.network_json = results.get('networkdata')
+ except ValueError as e:
+ LOG.warn("Invalid content in network-data: %s", e)
+ self.network_json = None
+
return True
+ def check_instance_id(self):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
+ @property
+ def network_config(self):
+ if self._network_config is None:
+ if self.network_json is not None:
+ self._network_config = convert_network_data(self.network_json)
+ return self._network_config
+
class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths):
@@ -216,11 +236,11 @@ def on_first_boot(data, distro=None):
files = data.get('files', {})
if files:
LOG.debug("Writing %s injected files", len(files))
- for (filename, content) in files.iteritems():
+ for (filename, content) in files.items():
if not filename.startswith(os.sep):
filename = os.sep + filename
try:
- util.write_file(filename, content, mode=0660)
+ util.write_file(filename, content, mode=0o660)
except IOError:
util.logexc(LOG, "Failed writing file: %s", filename)
@@ -283,3 +303,122 @@ datasources = [
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+
+# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
+def convert_network_data(network_json=None):
+ """Return a dictionary of network_config by parsing provided
+ OpenStack ConfigDrive NetworkData json format
+
+ OpenStack network_data.json provides a 3 element dictionary
+ - "links" (links are network devices, physical or virtual)
+ - "networks" (networks are ip network configurations for one or more
+ links)
+ - services (non-ip services, like dns)
+
+ networks and links are combined via network items referencing specific
+ links via a 'link_id' which maps to a links 'id' field.
+
+ To convert this format to network_config yaml, we first iterate over the
+ links and then walk the network list to determine if any of the networks
+ utilize the current link; if so we generate a subnet entry for the device
+
+ We also need to map network_data.json fields to network_config fields. For
+ example, the network_data links 'id' field is equivalent to network_config
+ 'name' field for devices. We apply more of this mapping to the various
+ link types that we encounter.
+
+ There are additional fields that are populated in the network_data.json
+ from OpenStack that are not relevant to network_config yaml, so we
+ enumerate a dictionary of valid keys for network_yaml and apply filtering
+ to drop these superflous keys from the network_config yaml.
+ """
+ if network_json is None:
+ return None
+
+ # dict of network_config key for filtering network_json
+ valid_keys = {
+ 'physical': [
+ 'name',
+ 'type',
+ 'mac_address',
+ 'subnets',
+ 'params',
+ ],
+ 'subnet': [
+ 'type',
+ 'address',
+ 'netmask',
+ 'broadcast',
+ 'metric',
+ 'gateway',
+ 'pointopoint',
+ 'mtu',
+ 'scope',
+ 'dns_nameservers',
+ 'dns_search',
+ 'routes',
+ ],
+ }
+
+ links = network_json.get('links', [])
+ networks = network_json.get('networks', [])
+ services = network_json.get('services', [])
+
+ config = []
+ for link in links:
+ subnets = []
+ cfg = {k: v for k, v in link.items()
+ if k in valid_keys['physical']}
+ cfg.update({'name': link['id']})
+ for network in [net for net in networks
+ if net['link'] == link['id']]:
+ subnet = {k: v for k, v in network.items()
+ if k in valid_keys['subnet']}
+ if 'dhcp' in network['type']:
+ t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
+ subnet.update({
+ 'type': t,
+ })
+ else:
+ subnet.update({
+ 'type': 'static',
+ 'address': network.get('ip_address'),
+ })
+ subnets.append(subnet)
+ cfg.update({'subnets': subnets})
+ if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']:
+ cfg.update({
+ 'type': 'physical',
+ 'mac_address': link['ethernet_mac_address']})
+ elif link['type'] in ['bond']:
+ params = {}
+ for k, v in link.items():
+ if k == 'bond_links':
+ continue
+ elif k.startswith('bond'):
+ params.update({k: v})
+ cfg.update({
+ 'bond_interfaces': copy.deepcopy(link['bond_links']),
+ 'params': params,
+ })
+ elif link['type'] in ['vlan']:
+ cfg.update({
+ 'name': "%s.%s" % (link['vlan_link'],
+ link['vlan_id']),
+ 'vlan_link': link['vlan_link'],
+ 'vlan_id': link['vlan_id'],
+ 'mac_address': link['vlan_mac_address'],
+ })
+ else:
+ raise ValueError(
+ 'Unknown network_data link type: %s' % link['type'])
+
+ config.append(cfg)
+
+ for service in services:
+ cfg = service
+ cfg.update({'type': 'nameserver'})
+ config.append(cfg)
+
+ return {'version': 1, 'config': config}
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 8f27ee89..12e863d2 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -18,7 +18,7 @@ from cloudinit import log as logging
from cloudinit import util
from cloudinit import sources
from cloudinit import ec2_utils
-from types import StringType
+
import functools
@@ -54,9 +54,13 @@ class DataSourceDigitalOcean(sources.DataSource):
def get_data(self):
caller = functools.partial(util.read_file_or_url,
timeout=self.timeout, retries=self.retries)
- md = ec2_utils.MetadataMaterializer(str(caller(self.metadata_address)),
+
+ def mcaller(url):
+ return caller(url).contents
+
+ md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address),
base_url=self.metadata_address,
- caller=caller)
+ caller=mcaller)
self.metadata = md.materialize()
@@ -72,10 +76,11 @@ class DataSourceDigitalOcean(sources.DataSource):
return "\n".join(self.metadata['vendor-data'])
def get_public_ssh_keys(self):
- if type(self.metadata['public-keys']) is StringType:
- return [self.metadata['public-keys']]
+ public_keys = self.metadata['public-keys']
+ if isinstance(public_keys, list):
+ return public_keys
else:
- return self.metadata['public-keys']
+ return [public_keys]
@property
def availability_zone(self):
@@ -84,7 +89,7 @@ class DataSourceDigitalOcean(sources.DataSource):
def get_instance_id(self):
return self.metadata['id']
- def get_hostname(self, fqdn=False):
+ def get_hostname(self, fqdn=False, resolve_ip=False):
return self.metadata['hostname']
def get_package_mirror_info(self):
@@ -96,8 +101,8 @@ class DataSourceDigitalOcean(sources.DataSource):
# Used to match classes to dependencies
datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
- ]
+ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
# Return a list of data sources that match this set of dependencies
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 1b20ecf3..3ef2c6af 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -61,12 +61,12 @@ class DataSourceEc2(sources.DataSource):
if not self.wait_for_metadata_service():
return False
start_time = time.time()
- self.userdata_raw = ec2.get_instance_userdata(self.api_ver,
- self.metadata_address)
+ self.userdata_raw = \
+ ec2.get_instance_userdata(self.api_ver, self.metadata_address)
self.metadata = ec2.get_instance_metadata(self.api_ver,
self.metadata_address)
LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
+ int(time.time() - start_time))
return True
except Exception:
util.logexc(LOG, "Failed reading from metadata address %s",
@@ -132,13 +132,13 @@ class DataSourceEc2(sources.DataSource):
start_time = time.time()
url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
+ timeout=timeout, status_cb=LOG.warn)
if url:
LOG.debug("Using metadata source: '%s'", url2base[url])
else:
LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
+ urls, int(time.time() - start_time))
self.metadata_address = url2base.get(url)
return bool(url)
@@ -156,8 +156,8 @@ class DataSourceEc2(sources.DataSource):
# 'ephemeral0': '/dev/sdb',
# 'root': '/dev/sda1'}
found = None
- bdm_items = self.metadata['block-device-mapping'].iteritems()
- for (entname, device) in bdm_items:
+ bdm = self.metadata['block-device-mapping']
+ for (entname, device) in bdm.items():
if entname == name:
found = device
break
@@ -197,9 +197,16 @@ class DataSourceEc2(sources.DataSource):
except KeyError:
return None
+ @property
+ def region(self):
+ az = self.availability_zone
+ if az is not None:
+ return az[:-1]
+ return None
+
# Used to match classes to dependencies
datasources = [
- (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 2cf8fdcd..7e7fc033 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -30,6 +30,31 @@ BUILTIN_DS_CONFIG = {
REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
+class GoogleMetadataFetcher(object):
+ headers = {'X-Google-Metadata-Request': True}
+
+ def __init__(self, metadata_address):
+ self.metadata_address = metadata_address
+
+ def get_value(self, path, is_text):
+ value = None
+ try:
+ resp = url_helper.readurl(url=self.metadata_address + path,
+ headers=self.headers)
+ except url_helper.UrlError as exc:
+ msg = "url %s raised exception %s"
+ LOG.debug(msg, path, exc)
+ else:
+ if resp.code == 200:
+ if is_text:
+ value = util.decode_binary(resp.contents)
+ else:
+ value = resp.contents
+ else:
+ LOG.debug("url %s returned code %s", path, resp.code)
+ return value
+
+
class DataSourceGCE(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -50,18 +75,16 @@ class DataSourceGCE(sources.DataSource):
return public_key
def get_data(self):
- # GCE metadata server requires a custom header since v1
- headers = {'X-Google-Metadata-Request': True}
-
- # url_map: (our-key, path, required)
+ # url_map: (our-key, path, required, is_text)
url_map = [
- ('instance-id', 'instance/id', True),
- ('availability-zone', 'instance/zone', True),
- ('local-hostname', 'instance/hostname', True),
- ('public-keys', 'project/attributes/sshKeys', False),
- ('user-data', 'instance/attributes/user-data', False),
- ('user-data-encoding', 'instance/attributes/user-data-encoding',
- False),
+ ('instance-id', ('instance/id',), True, True),
+ ('availability-zone', ('instance/zone',), True, True),
+ ('local-hostname', ('instance/hostname',), True, True),
+ ('public-keys', ('project/attributes/sshKeys',
+ 'instance/attributes/sshKeys'), False, True),
+ ('user-data', ('instance/attributes/user-data',), False, False),
+ ('user-data-encoding', ('instance/attributes/user-data-encoding',),
+ False, True),
]
# if we cannot resolve the metadata server, then no point in trying
@@ -69,42 +92,34 @@ class DataSourceGCE(sources.DataSource):
LOG.debug("%s is not resolvable", self.metadata_address)
return False
+ metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
# iterate over url_map keys to get metadata items
- found = False
- for (mkey, path, required) in url_map:
- try:
- resp = url_helper.readurl(url=self.metadata_address + path,
- headers=headers)
- if resp.code == 200:
- found = True
- self.metadata[mkey] = resp.contents
+ running_on_gce = False
+ for (mkey, paths, required, is_text) in url_map:
+ value = None
+ for path in paths:
+ new_value = metadata_fetcher.get_value(path, is_text)
+ if new_value is not None:
+ value = new_value
+ if value:
+ running_on_gce = True
+ if required and value is None:
+ msg = "required key %s returned nothing. not GCE"
+ if not running_on_gce:
+ LOG.debug(msg, mkey)
else:
- if required:
- msg = "required url %s returned code %s. not GCE"
- if not found:
- LOG.debug(msg, path, resp.code)
- else:
- LOG.warn(msg, path, resp.code)
- return False
- else:
- self.metadata[mkey] = None
- except url_helper.UrlError as e:
- if required:
- msg = "required url %s raised exception %s. not GCE"
- if not found:
- LOG.debug(msg, path, e)
- else:
- LOG.warn(msg, path, e)
- return False
- msg = "Failed to get %s metadata item: %s."
- LOG.debug(msg, path, e)
-
- self.metadata[mkey] = None
+ LOG.warn(msg, mkey)
+ return False
+ self.metadata[mkey] = value
if self.metadata['public-keys']:
lines = self.metadata['public-keys'].splitlines()
self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
+ if self.metadata['availability-zone']:
+ self.metadata['availability-zone'] = self.metadata[
+ 'availability-zone'].split('/')[-1]
+
encoding = self.metadata.get('user-data-encoding')
if encoding:
if encoding == 'base64':
@@ -113,7 +128,7 @@ class DataSourceGCE(sources.DataSource):
else:
LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
- return found
+ return running_on_gce
@property
def launch_index(self):
@@ -126,7 +141,7 @@ class DataSourceGCE(sources.DataSource):
def get_public_ssh_keys(self):
return self.metadata['public-keys']
- def get_hostname(self, fqdn=False, _resolve_ip=False):
+ def get_hostname(self, fqdn=False, resolve_ip=False):
# GCE has long FDQN's and has asked for short hostnames
return self.metadata['local-hostname'].split('.')[0]
@@ -137,6 +152,10 @@ class DataSourceGCE(sources.DataSource):
def availability_zone(self):
return self.metadata['availability-zone']
+ @property
+ def region(self):
+ return self.availability_zone.rsplit('-', 1)[0]
+
# Used to match classes to dependencies
datasources = [
(DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index dfe90bc6..d828f078 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -18,12 +18,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from email.utils import parsedate
+from __future__ import print_function
+
import errno
-import oauth.oauth as oauth
import os
import time
-import urllib2
from cloudinit import log as logging
from cloudinit import sources
@@ -33,6 +32,8 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
+BINARY_FIELDS = ('user-data',)
+
class DataSourceMAAS(sources.DataSource):
"""
@@ -47,7 +48,20 @@ class DataSourceMAAS(sources.DataSource):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.base_url = None
self.seed_dir = os.path.join(paths.seed_dir, 'maas')
- self.oauth_clockskew = None
+ self.oauth_helper = self._get_helper()
+
+ def _get_helper(self):
+ mcfg = self.ds_cfg
+ # If we are missing token_key, token_secret or consumer_key
+ # then just do non-authed requests
+ for required in ('token_key', 'token_secret', 'consumer_key'):
+ if required not in mcfg:
+ return url_helper.OauthUrlHelper()
+
+ return url_helper.OauthUrlHelper(
+ consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'],
+ token_secret=mcfg['token_secret'],
+ consumer_secret=mcfg.get('consumer_secret'))
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -74,14 +88,18 @@ class DataSourceMAAS(sources.DataSource):
return False
try:
+ # doing this here actually has a side affect of
+ # getting oauth time-fix in place. As no where else would
+ # retry by default, so even if we could fix the timestamp
+ # we would not.
if not self.wait_for_metadata_service(url):
return False
self.base_url = url
- (userdata, metadata) = read_maas_seed_url(self.base_url,
- self._md_headers,
- paths=self.paths)
+ (userdata, metadata) = read_maas_seed_url(
+ self.base_url, read_file_or_url=self.oauth_helper.readurl,
+ paths=self.paths, retries=1)
self.userdata_raw = userdata
self.metadata = metadata
return True
@@ -89,31 +107,8 @@ class DataSourceMAAS(sources.DataSource):
util.logexc(LOG, "Failed fetching metadata from url %s", url)
return False
- def _md_headers(self, url):
- mcfg = self.ds_cfg
-
- # If we are missing token_key, token_secret or consumer_key
- # then just do non-authed requests
- for required in ('token_key', 'token_secret', 'consumer_key'):
- if required not in mcfg:
- return {}
-
- consumer_secret = mcfg.get('consumer_secret', "")
-
- timestamp = None
- if self.oauth_clockskew:
- timestamp = int(time.time()) + self.oauth_clockskew
-
- return oauth_headers(url=url,
- consumer_key=mcfg['consumer_key'],
- token_key=mcfg['token_key'],
- token_secret=mcfg['token_secret'],
- consumer_secret=consumer_secret,
- timestamp=timestamp)
-
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
-
max_wait = 120
try:
max_wait = int(mcfg.get("max_wait", max_wait))
@@ -133,10 +128,8 @@ class DataSourceMAAS(sources.DataSource):
starttime = time.time()
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
- url = url_helper.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout,
- exception_cb=self._except_cb,
- headers_cb=self._md_headers)
+ url = self.oauth_helper.wait_for_url(
+ urls=urls, max_wait=max_wait, timeout=timeout)
if url:
LOG.debug("Using metadata source: '%s'", url)
@@ -146,26 +139,6 @@ class DataSourceMAAS(sources.DataSource):
return bool(url)
- def _except_cb(self, msg, exception):
- if not (isinstance(exception, url_helper.UrlError) and
- (exception.code == 403 or exception.code == 401)):
- return
-
- if 'date' not in exception.headers:
- LOG.warn("Missing header 'date' in %s response", exception.code)
- return
-
- date = exception.headers['date']
- try:
- ret_time = time.mktime(parsedate(date))
- except Exception as e:
- LOG.warn("Failed to convert datetime '%s': %s", date, e)
- return
-
- self.oauth_clockskew = int(ret_time - time.time())
- LOG.warn("Setting oauth clockskew to %d", self.oauth_clockskew)
- return
-
def read_maas_seed_dir(seed_d):
"""
@@ -182,7 +155,8 @@ def read_maas_seed_dir(seed_d):
md = {}
for fname in files:
try:
- md[fname] = util.load_file(os.path.join(seed_d, fname))
+ md[fname] = util.load_file(os.path.join(seed_d, fname),
+ decode=fname not in BINARY_FIELDS)
except IOError as e:
if e.errno != errno.ENOENT:
raise
@@ -190,12 +164,12 @@ def read_maas_seed_dir(seed_d):
return check_seed_contents(md, seed_d)
-def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
- version=MD_VERSION, paths=None):
+def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
+ version=MD_VERSION, paths=None, retries=None):
"""
Read the maas datasource at seed_url.
- - header_cb is a method that should return a headers dictionary for
- a given url
+ read_file_or_url is a method that should provide an interface
+ like util.read_file_or_url
Expected format of seed_url is are the following files:
* <seed_url>/<version>/meta-data/instance-id
@@ -215,27 +189,27 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
'user-data': "%s/%s" % (base_url, 'user-data'),
}
+
+ if read_file_or_url is None:
+ read_file_or_url = util.read_file_or_url
+
md = {}
for name in file_order:
url = files.get(name)
- if not header_cb:
- def _cb(url):
- return {}
- header_cb = _cb
-
if name == 'user-data':
- retries = 0
+ item_retries = 0
else:
- retries = None
+ item_retries = retries
try:
ssl_details = util.fetch_ssl_details(paths)
- resp = util.read_file_or_url(url, retries=retries,
- headers_cb=header_cb,
- timeout=timeout,
- ssl_details=ssl_details)
+ resp = read_file_or_url(url, retries=item_retries,
+ timeout=timeout, ssl_details=ssl_details)
if resp.ok():
- md[name] = str(resp)
+ if name in BINARY_FIELDS:
+ md[name] = resp.contents
+ else:
+ md[name] = util.decode_binary(resp.contents)
else:
LOG.warn(("Fetching from %s resulted in"
" an invalid http code %s"), url, resp.code)
@@ -260,9 +234,9 @@ def check_seed_contents(content, seed):
if len(missing):
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
- userdata = content.get('user-data', "")
+ userdata = content.get('user-data', b"")
md = {}
- for (key, val) in content.iteritems():
+ for (key, val) in content.items():
if key == 'user-data':
continue
md[key] = val
@@ -270,29 +244,6 @@ def check_seed_contents(content, seed):
return (userdata, md)
-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
- timestamp=None):
- consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
- token = oauth.OAuthToken(token_key, token_secret)
-
- if timestamp is None:
- ts = int(time.time())
- else:
- ts = timestamp
-
- params = {
- 'oauth_version': "1.0",
- 'oauth_nonce': oauth.generate_nonce(),
- 'oauth_timestamp': ts,
- 'oauth_token': token.key,
- 'oauth_consumer_key': consumer.key,
- }
- req = oauth.OAuthRequest(http_url=url, parameters=params)
- req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
- consumer, token)
- return req.to_header()
-
-
class MAASSeedDirNone(Exception):
pass
@@ -303,7 +254,7 @@ class MAASSeedDirMalformed(Exception):
# Used to match classes to dependencies
datasources = [
- (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
@@ -324,17 +275,18 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Interact with MAAS DS')
parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
+ help="specify DS config file", default=None)
parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
+ help="the consumer key to auth with", default=None)
parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
+ help="the token key to auth with", default=None)
parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
+ help="the consumer secret (likely '')", default="")
parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
+ help="the token secret to auth with", default=None)
parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)", default=MD_VERSION)
+ help="the apiver to use ("" can be used)",
+ default=MD_VERSION)
subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
subcmds.add_parser('crawl', help="crawl the datasource")
@@ -346,7 +298,7 @@ if __name__ == "__main__":
args = parser.parse_args()
creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
+ 'token_secret': args.tsec, 'consumer_secret': args.csec}
if args.config:
cfg = util.read_conf(args.config)
@@ -356,47 +308,46 @@ if __name__ == "__main__":
if key in cfg and creds[key] is None:
creds[key] = cfg[key]
- def geturl(url, headers_cb):
- req = urllib2.Request(url, data=None, headers=headers_cb(url))
- return (urllib2.urlopen(req).read())
+ oauth_helper = url_helper.OauthUrlHelper(**creds)
+
+ def geturl(url):
+ # the retry is to ensure that oauth timestamp gets fixed
+ return oauth_helper.readurl(url, retries=1).contents
- def printurl(url, headers_cb):
- print "== %s ==\n%s\n" % (url, geturl(url, headers_cb))
+ def printurl(url):
+ print("== %s ==\n%s\n" % (url, geturl(url).decode()))
- def crawl(url, headers_cb=None):
+ def crawl(url):
if url.endswith("/"):
- for line in geturl(url, headers_cb).splitlines():
+ for line in geturl(url).decode().splitlines():
if line.endswith("/"):
- crawl("%s%s" % (url, line), headers_cb)
+ crawl("%s%s" % (url, line))
+ elif line == "meta-data":
+ # meta-data is a dir, it *should* end in a /
+ crawl("%s%s" % (url, "meta-data/"))
else:
- printurl("%s%s" % (url, line), headers_cb)
+ printurl("%s%s" % (url, line))
else:
- printurl(url, headers_cb)
-
- def my_headers(url):
- headers = {}
- if creds.get('consumer_key', None) is not None:
- headers = oauth_headers(url, **creds)
- return headers
+ printurl(url)
if args.subcmd == "check-seed":
- if args.url.startswith("http"):
- (userdata, metadata) = read_maas_seed_url(args.url,
- header_cb=my_headers,
- version=args.apiver)
- else:
- (userdata, metadata) = read_maas_seed_url(args.url)
- print "=== userdata ==="
- print userdata
- print "=== metadata ==="
+ readurl = oauth_helper.readurl
+ if args.url[0] == "/" or args.url.startswith("file://"):
+ readurl = None
+ (userdata, metadata) = read_maas_seed_url(
+ args.url, version=args.apiver, read_file_or_url=readurl,
+ retries=2)
+ print("=== userdata ===")
+ print(userdata.decode())
+ print("=== metadata ===")
pprint.pprint(metadata)
elif args.subcmd == "get":
- printurl(args.url, my_headers)
+ printurl(args.url)
elif args.subcmd == "crawl":
if not args.url.endswith("/"):
args.url = "%s/" % args.url
- crawl(args.url, my_headers)
+ crawl(args.url)
main()
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index c26a645c..c2fba4d2 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -36,7 +36,9 @@ class DataSourceNoCloud(sources.DataSource):
self.dsmode = 'local'
self.seed = None
self.cmdline_id = "ds=nocloud"
- self.seed_dir = os.path.join(paths.seed_dir, 'nocloud')
+ self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
+ os.path.join(paths.seed_dir, 'nocloud-net')]
+ self.seed_dir = None
self.supported_seed_starts = ("/", "file://")
def __str__(self):
@@ -50,31 +52,32 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}
+ mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
+ 'network-config': {}}
try:
# Parse the kernel command line, getting data passed in
md = {}
if parse_cmdline_data(self.cmdline_id, md):
found.append("cmdline")
- mydata['meta-data'].update(md)
+ mydata = _merge_new_seed(mydata, {'meta-data': md})
except:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data']}
-
- try:
- seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
- found.append(self.seed_dir)
- LOG.debug("Using seeded data from %s", self.seed_dir)
- except ValueError as e:
- pass
-
- if self.seed_dir in found:
- mydata = _merge_new_seed(mydata, seeded)
+ 'optional': ['vendor-data', 'network-config']}
+
+ for path in self.seed_dirs:
+ try:
+ seeded = util.pathprefix2dict(path, **pp2d_kwargs)
+ found.append(path)
+ LOG.debug("Using seeded data from %s", path)
+ mydata = _merge_new_seed(mydata, seeded)
+ break
+ except ValueError as e:
+ pass
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
@@ -124,7 +127,7 @@ class DataSourceNoCloud(sources.DataSource):
# that is more likely to be what is desired. If they want
# dsmode of local, then they must specify that.
if 'dsmode' not in mydata['meta-data']:
- mydata['dsmode'] = "net"
+ mydata['meta-data']['dsmode'] = "net"
LOG.debug("Using data from %s", dev)
found.append(dev)
@@ -141,8 +144,7 @@ class DataSourceNoCloud(sources.DataSource):
if len(found) == 0:
return False
- seeded_interfaces = None
-
+ seeded_network = None
# The special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
@@ -158,8 +160,9 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
- if 'network-interfaces' in mydata['meta-data']:
- seeded_interfaces = self.dsmode
+ if (mydata['meta-data'].get('network-interfaces') or
+ mydata.get('network-config')):
+ seeded_network = self.dsmode
# This could throw errors, but the user told us to do it
# so if errors are raised, let them raise
@@ -176,26 +179,75 @@ class DataSourceNoCloud(sources.DataSource):
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
defaults])
- # Update the network-interfaces if metadata had 'network-interfaces'
- # entry and this is the local datasource, or 'seedfrom' was used
- # and the source of the seed was self.dsmode
- # ('local' for NoCloud, 'net' for NoCloudNet')
- if ('network-interfaces' in mydata['meta-data'] and
- (self.dsmode in ("local", seeded_interfaces))):
- LOG.debug("Updating network interfaces from %s", self)
- self.distro.apply_network(
- mydata['meta-data']['network-interfaces'])
+ netdata = {'format': None, 'data': None}
+ if mydata['meta-data'].get('network-interfaces'):
+ netdata['format'] = 'interfaces'
+ netdata['data'] = mydata['meta-data']['network-interfaces']
+ elif mydata.get('network-config'):
+ netdata['format'] = 'network-config'
+ netdata['data'] = mydata['network-config']
+
+ # if this is the local datasource or 'seedfrom' was used
+ # and the source of the seed was self.dsmode.
+ # Then see if there is network config to apply.
+ # note this is obsolete network-interfaces style seeding.
+ if self.dsmode in ("local", seeded_network):
+ if mydata['meta-data'].get('network-interfaces'):
+ LOG.debug("Updating network interfaces from %s", self)
+ self.distro.apply_network(
+ mydata['meta-data']['network-interfaces'])
if mydata['meta-data']['dsmode'] == self.dsmode:
self.seed = ",".join(found)
self.metadata = mydata['meta-data']
self.userdata_raw = mydata['user-data']
- self.vendordata = mydata['vendor-data']
+ self.vendordata_raw = mydata['vendor-data']
+ self._network_config = mydata['network-config']
return True
- LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self,
+ mydata['meta-data']['dsmode'])
return False
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ # we check kernel command line or files.
+ current = self.get_instance_id()
+ if not current:
+ return None
+
+ quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id,
+ dirs=self.seed_dirs)
+ if not quick_id:
+ return None
+ return quick_id == current
+
+ @property
+ def network_config(self):
+ return self._network_config
+
+
+def _quick_read_instance_id(cmdline_id, dirs=None):
+ if dirs is None:
+ dirs = []
+
+ iid_key = 'instance-id'
+ if cmdline_id is None:
+ fill = {}
+ if parse_cmdline_data(cmdline_id, fill) and iid_key in fill:
+ return fill[iid_key]
+
+ for d in dirs:
+ try:
+ data = util.pathprefix2dict(d, required=['meta-data'])
+ md = util.load_yaml(data['meta-data'])
+ if iid_key in md:
+ return md[iid_key]
+ except ValueError:
+ pass
+
+ return None
+
# Returns true or false indicating if cmdline indicated
# that this module should be used
@@ -243,9 +295,17 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
def _merge_new_seed(cur, seeded):
ret = cur.copy()
- ret['meta-data'] = util.mergemanydict([cur['meta-data'],
- util.load_yaml(seeded['meta-data'])])
- ret['user-data'] = seeded['user-data']
+
+ newmd = seeded.get('meta-data', {})
+ if not isinstance(seeded['meta-data'], dict):
+ newmd = util.load_yaml(seeded['meta-data'])
+ ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
+
+ if seeded.get('network-config'):
+ ret['network-config'] = util.load_yaml(seeded['network-config'])
+
+ if 'user-data' in seeded:
+ ret['user-data'] = seeded['user-data']
if 'vendor-data' in seeded:
ret['vendor-data'] = seeded['vendor-data']
return ret
@@ -256,14 +316,13 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
self.cmdline_id = "ds=nocloud-net"
self.supported_seed_starts = ("http://", "https://", "ftp://")
- self.seed_dir = os.path.join(paths.seed_dir, 'nocloud-net')
self.dsmode = "net"
# Used to match classes to dependencies
datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
- (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
+ (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index 12a8a992..d1a62b2a 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -47,8 +47,8 @@ class DataSourceNone(sources.DataSource):
# Used to match classes to dependencies
datasources = [
- (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
- (DataSourceNone, []),
+ (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceNone, []),
]
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 7ba60735..2a6cd050 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -25,10 +25,22 @@ from xml.dom import minidom
import base64
import os
import re
+import time
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
+from .helpers.vmware.imc.config import Config
+from .helpers.vmware.imc.config_file import ConfigFile
+from .helpers.vmware.imc.config_nic import NicConfigurator
+from .helpers.vmware.imc.guestcust_event import GuestCustEventEnum
+from .helpers.vmware.imc.guestcust_state import GuestCustStateEnum
+from .helpers.vmware.imc.guestcust_error import GuestCustErrorEnum
+from .helpers.vmware.imc.guestcust_util import (
+ set_customization_status,
+ get_nics_to_enable,
+ enable_nics
+)
LOG = logging.getLogger(__name__)
@@ -50,23 +62,95 @@ class DataSourceOVF(sources.DataSource):
found = []
md = {}
ud = ""
+ vmwarePlatformFound = False
+ vmwareImcConfigFilePath = ''
defaults = {
"instance-id": "iid-dsovf",
}
(seedfile, contents) = get_ovf_env(self.paths.seed_dir)
+
+ system_type = util.read_dmi_data("system-product-name")
+ if system_type is None:
+ LOG.debug("No system-product-name found")
+
if seedfile:
# Found a seed dir
seed = os.path.join(self.paths.seed_dir, seedfile)
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(seed)
+ elif system_type and 'vmware' in system_type.lower():
+ LOG.debug("VMware Virtualization Platform found")
+ if not util.get_cfg_option_bool(
+ self.sys_cfg, "disable_vmware_customization", True):
+ deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
+ "libdeployPkgPlugin.so")
+ if not deployPkgPluginPath:
+ deployPkgPluginPath = search_file("/usr/lib/open-vm-tools",
+ "libdeployPkgPlugin.so")
+ if deployPkgPluginPath:
+ # When the VM is powered on, the "VMware Tools" daemon
+ # copies the customization specification file to
+ # /var/run/vmware-imc directory. cloud-init code needs
+ # to search for the file in that directory.
+ vmwareImcConfigFilePath = util.log_time(
+ logfunc=LOG.debug,
+ msg="waiting for configuration file",
+ func=wait_for_imc_cfg_file,
+ args=("/var/run/vmware-imc", "cust.cfg"))
+
+ if vmwareImcConfigFilePath:
+ LOG.debug("Found VMware DeployPkg Config File at %s" %
+ vmwareImcConfigFilePath)
+ else:
+ LOG.debug("Did not find VMware DeployPkg Config File Path")
+ else:
+ LOG.debug("Customization for VMware platform is disabled.")
+
+ if vmwareImcConfigFilePath:
+ nics = ""
+ try:
+ cf = ConfigFile(vmwareImcConfigFilePath)
+ conf = Config(cf)
+ (md, ud, cfg) = read_vmware_imc(conf)
+ dirpath = os.path.dirname(vmwareImcConfigFilePath)
+ nics = get_nics_to_enable(dirpath)
+ except Exception as e:
+ LOG.debug("Error parsing the customization Config File")
+ LOG.exception(e)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
+ enable_nics(nics)
+ return False
+ finally:
+ util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
+
+ try:
+ LOG.debug("Applying the Network customization")
+ nicConfigurator = NicConfigurator(conf.nics)
+ nicConfigurator.configure()
+ except Exception as e:
+ LOG.debug("Error applying the Network Configuration")
+ LOG.exception(e)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)
+ enable_nics(nics)
+ return False
+
+ vmwarePlatformFound = True
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_DONE,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ enable_nics(nics)
else:
np = {'iso': transport_iso9660,
'vmware-guestd': transport_vmware_guestd, }
name = None
- for (name, transfunc) in np.iteritems():
+ for (name, transfunc) in np.items():
(contents, _dev, _fname) = transfunc()
if contents:
break
@@ -76,7 +160,7 @@ class DataSourceOVF(sources.DataSource):
found.append(name)
# There was no OVF transports found
- if len(found) == 0:
+ if len(found) == 0 and not vmwarePlatformFound:
return False
if 'seedfrom' in md and md['seedfrom']:
@@ -129,6 +213,36 @@ class DataSourceOVFNet(DataSourceOVF):
self.supported_seed_starts = ("http://", "https://", "ftp://")
+def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
+ waited = 0
+
+ while waited < maxwait:
+ fileFullPath = search_file(dirpath, filename)
+ if fileFullPath:
+ return fileFullPath
+ time.sleep(naplen)
+ waited += naplen
+ return None
+
+
+# This will return a dict with some content
+# meta-data, user-data, some config
+def read_vmware_imc(config):
+ md = {}
+ cfg = {}
+ ud = ""
+ if config.host_name:
+ if config.domain_name:
+ md['local-hostname'] = config.host_name + "." + config.domain_name
+ else:
+ md['local-hostname'] = config.host_name
+
+ if config.timezone:
+ cfg['timezone'] = config.timezone
+
+ return (md, ud, cfg)
+
+
# This will return a dict with some content
# meta-data, user-data, some config
def read_ovf_environment(contents):
@@ -138,7 +252,7 @@ def read_ovf_environment(contents):
ud = ""
cfg_props = ['password']
md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
- for (prop, val) in props.iteritems():
+ for (prop, val) in props.items():
if prop == 'hostname':
prop = "local-hostname"
if prop in md_props:
@@ -183,7 +297,7 @@ def transport_iso9660(require_iso=True):
# Go through mounts to see if it was already mounted
mounts = util.mounts()
- for (dev, info) in mounts.iteritems():
+ for (dev, info) in mounts.items():
fstype = info['fstype']
if fstype != "iso9660" and require_iso:
continue
@@ -264,14 +378,14 @@ def get_properties(contents):
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
+ lambda n: n.localName == "PropertySection")
if len(propSections) == 0:
raise XmlError("No 'PropertySection's")
props = {}
propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
+ (lambda n: n.localName == "Property"))
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
@@ -281,14 +395,25 @@ def get_properties(contents):
return props
+def search_file(dirpath, filename):
+ if not dirpath or not filename:
+ return None
+
+ for root, dirs, files in os.walk(dirpath):
+ if filename in files:
+ return os.path.join(root, filename)
+
+ return None
+
+
class XmlError(Exception):
pass
# Used to match classes to dependencies
datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
- (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index e2469f6e..681f3a96 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -24,7 +24,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import base64
import os
import pwd
import re
@@ -34,6 +33,7 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
+
LOG = logging.getLogger(__name__)
DEFAULT_IID = "iid-dsopennebula"
@@ -149,8 +149,8 @@ class BrokenContextDiskDir(Exception):
class OpenNebulaNetwork(object):
REG_DEV_MAC = re.compile(
- r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
- re.MULTILINE | re.DOTALL)
+ r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
+ re.MULTILINE | re.DOTALL)
def __init__(self, ip, context):
self.ip = ip
@@ -280,7 +280,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# allvars expands to all existing variables by using '${!x*}' notation
# where x is lower or upper case letters or '_'
- allvars = ["${!%s*}" % x for x in string.letters + "_"]
+ allvars = ["${!%s*}" % x for x in string.ascii_letters + "_"]
keylist_in = keylist
if keylist is None:
@@ -379,9 +379,8 @@ def read_context_disk_dir(source_dir, asuser=None):
raise BrokenContextDiskDir("configured user '%s' "
"does not exist", asuser)
try:
- with open(os.path.join(source_dir, 'context.sh'), 'r') as f:
- content = f.read().strip()
-
+ path = os.path.join(source_dir, 'context.sh')
+ content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
except util.ProcessExecutionError as e:
raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
@@ -405,7 +404,8 @@ def read_context_disk_dir(source_dir, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
results['metadata']['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
+ if len(l) and not
+ l.startswith("#")]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
@@ -426,14 +426,14 @@ def read_context_disk_dir(source_dir, asuser=None):
context.get('USER_DATA_ENCODING'))
if encoding == "base64":
try:
- results['userdata'] = base64.b64decode(results['userdata'])
+ results['userdata'] = util.b64d(results['userdata'])
except TypeError:
LOG.warn("Failed base64 decoding of userdata")
# generate static /etc/network/interfaces
# only if there are any required context variables
# http://opennebula.org/documentation:rel3.8:cong#network_configuration
- for k in context.keys():
+ for k in context:
if re.match(r'^ETH\d+_IP$', k):
(out, _) = util.subp(['/sbin/ip', 'link'])
net = OpenNebulaNetwork(out, context)
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 469c2e2a..f7f4590b 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -150,6 +150,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
return True
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
def read_metadata_service(base_url, ssl_details=None):
reader = openstack.MetadataReader(base_url, ssl_details=ssl_details)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 2733a2f6..5edab152 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -20,28 +20,38 @@
# Datasource for provisioning on SmartOS. This works on Joyent
# and public/private Clouds using SmartOS.
#
-# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests.
+# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
# The meta-data is transmitted via key/value pairs made by
# requests on the console. For example, to get the hostname, you
# would send "GET hostname" on /dev/ttyS1.
+# For Linux Guests running in LX-Brand Zones on SmartOS hosts
+# a socket (/native/.zonecontrol/metadata.sock) is used instead
+# of a serial console.
#
# Certain behavior is defined by the DataDictionary
# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
# Comments with "@datadictionary" are snippets of the definition
-import base64
+import binascii
+import contextlib
+import os
+import random
+import re
+import socket
+import stat
+
+import serial
+
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
-import os
-import os.path
-import serial
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
# Cloud-init Key : (SmartOS Key, Strip line endings)
+ 'instance-id': ('sdc:uuid', True),
'local-hostname': ('hostname', True),
'public-keys': ('root_authorized_keys', True),
'user-script': ('user-script', False),
@@ -72,6 +82,7 @@ DS_CFG_PATH = ['datasource', DS_NAME]
#
BUILTIN_DS_CONFIG = {
'serial_device': '/dev/ttyS1',
+ 'metadata_sockfile': '/native/.zonecontrol/metadata.sock',
'seed_timeout': 60,
'no_base64_decode': ['root_authorized_keys',
'motd_sys_info',
@@ -79,7 +90,7 @@ BUILTIN_DS_CONFIG = {
'user-data',
'user-script',
'sdc:datacenter_name',
- ],
+ 'sdc:uuid'],
'base64_keys': [],
'base64_all': False,
'disk_aliases': {'ephemeral0': '/dev/vdb'},
@@ -90,7 +101,7 @@ BUILTIN_CLOUD_CONFIG = {
'ephemeral0': {'table_type': 'mbr',
'layout': False,
'overwrite': False}
- },
+ },
'fs_setup': [{'label': 'ephemeral0',
'filesystem': 'ext3',
'device': 'ephemeral0'}],
@@ -146,17 +157,27 @@ class DataSourceSmartOS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.is_smartdc = None
-
self.ds_cfg = util.mergemanydict([
self.ds_cfg,
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
self.metadata = {}
- self.cfg = BUILTIN_CLOUD_CONFIG
- self.seed = self.ds_cfg.get("serial_device")
- self.seed_timeout = self.ds_cfg.get("serial_timeout")
+ # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
+ # report 'BrandZ virtual linux' as the kernel version
+ if os.uname()[3].lower() == 'brandz virtual linux':
+ LOG.debug("Host is SmartOS, guest in Zone")
+ self.is_smartdc = True
+ self.smartos_type = 'lx-brand'
+ self.cfg = {}
+ self.seed = self.ds_cfg.get("metadata_sockfile")
+ else:
+ self.is_smartdc = True
+ self.smartos_type = 'kvm'
+ self.seed = self.ds_cfg.get("serial_device")
+ self.cfg = BUILTIN_CLOUD_CONFIG
+ self.seed_timeout = self.ds_cfg.get("serial_timeout")
self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
self.b64_keys = self.ds_cfg.get('base64_keys')
self.b64_all = self.ds_cfg.get('base64_all')
@@ -166,12 +187,49 @@ class DataSourceSmartOS(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
+ def _get_seed_file_object(self):
+ if not self.seed:
+ raise AttributeError("seed device is not set")
+
+ if self.smartos_type == 'lx-brand':
+ if not stat.S_ISSOCK(os.stat(self.seed).st_mode):
+ LOG.debug("Seed %s is not a socket", self.seed)
+ return None
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(self.seed)
+ return sock.makefile('rwb')
+ else:
+ if not stat.S_ISCHR(os.stat(self.seed).st_mode):
+ LOG.debug("Seed %s is not a character device")
+ return None
+ ser = serial.Serial(self.seed, timeout=self.seed_timeout)
+ if not ser.isOpen():
+ raise SystemError("Unable to open %s" % self.seed)
+ return ser
+ return None
+
+ def _set_provisioned(self):
+ '''Mark the instance provisioning state as successful.
+
+ When run in a zone, the host OS will look for /var/svc/provisioning
+ to be renamed as /var/svc/provision_success. This should be done
+ after meta-data is successfully retrieved and from this point
+ the host considers the provision of the zone to be a success and
+ keeps the zone running.
+ '''
+
+ LOG.debug('Instance provisioning state set as successful')
+ svc_path = '/var/svc'
+ if os.path.exists('/'.join([svc_path, 'provisioning'])):
+ os.rename('/'.join([svc_path, 'provisioning']),
+ '/'.join([svc_path, 'provision_success']))
+
def get_data(self):
md = {}
ud = ""
if not device_exists(self.seed):
- LOG.debug("No serial device '%s' found for SmartOS datasource",
+ LOG.debug("No metadata device '%s' found for SmartOS datasource",
self.seed)
return False
@@ -181,29 +239,36 @@ class DataSourceSmartOS(sources.DataSource):
LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)")
return False
- dmi_info = dmi_data()
- if dmi_info is False:
- LOG.debug("No dmidata utility found")
- return False
-
- system_uuid, system_type = tuple(dmi_info)
- if 'smartdc' not in system_type.lower():
- LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
+ # SDC KVM instances will provide dmi data, LX-brand does not
+ if self.smartos_type == 'kvm':
+ dmi_info = dmi_data()
+ if dmi_info is False:
+ LOG.debug("No dmidata utility found")
+ return False
+
+ system_type = dmi_info
+ if 'smartdc' not in system_type.lower():
+ LOG.debug("Host is not on SmartOS. system_type=%s",
+ system_type)
+ return False
+ LOG.debug("Host is SmartOS, guest in KVM")
+
+ seed_obj = self._get_seed_file_object()
+ if seed_obj is None:
+ LOG.debug('Seed file object not found.')
return False
- self.is_smartdc = True
- md['instance-id'] = system_uuid
-
- b64_keys = self.query('base64_keys', strip=True, b64=False)
- if b64_keys is not None:
- self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
+ with contextlib.closing(seed_obj) as seed:
+ b64_keys = self.query('base64_keys', seed, strip=True, b64=False)
+ if b64_keys is not None:
+ self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
- b64_all = self.query('base64_all', strip=True, b64=False)
- if b64_all is not None:
- self.b64_all = util.is_true(b64_all)
+ b64_all = self.query('base64_all', seed, strip=True, b64=False)
+ if b64_all is not None:
+ self.b64_all = util.is_true(b64_all)
- for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
- smartos_noun, strip = attribute
- md[ci_noun] = self.query(smartos_noun, strip=strip)
+ for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
+ smartos_noun, strip = attribute
+ md[ci_noun] = self.query(smartos_noun, seed, strip=strip)
# @datadictionary: This key may contain a program that is written
# to a file in the filesystem of the guest on each boot and then
@@ -218,11 +283,12 @@ class DataSourceSmartOS(sources.DataSource):
user_script = os.path.join(data_d, 'user-script')
u_script_l = "%s/user-script" % LEGACY_USER_D
write_boot_content(md.get('user-script'), content_f=user_script,
- link=u_script_l, shebang=True, mode=0700)
+ link=u_script_l, shebang=True, mode=0o700)
operator_script = os.path.join(data_d, 'operator-script')
write_boot_content(md.get('operator-script'),
- content_f=operator_script, shebang=False, mode=0700)
+ content_f=operator_script, shebang=False,
+ mode=0o700)
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
@@ -235,7 +301,7 @@ class DataSourceSmartOS(sources.DataSource):
# Handle the cloud-init regular meta
if not md['local-hostname']:
- md['local-hostname'] = system_uuid
+ md['local-hostname'] = md['instance-id']
ud = None
if md['user-data']:
@@ -252,6 +318,8 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
self.vendordata_raw = md['vendor-data']
+
+ self._set_provisioned()
return True
def device_name_to_device(self, name):
@@ -263,125 +331,146 @@ class DataSourceSmartOS(sources.DataSource):
def get_instance_id(self):
return self.metadata['instance-id']
- def query(self, noun, strip=False, default=None, b64=None):
+ def query(self, noun, seed_file, strip=False, default=None, b64=None):
if b64 is None:
if noun in self.smartos_no_base64:
b64 = False
elif self.b64_all or noun in self.b64_keys:
b64 = True
- return query_data(noun=noun, strip=strip, seed_device=self.seed,
- seed_timeout=self.seed_timeout, default=default,
- b64=b64)
+ return self._query_data(noun, seed_file, strip=strip,
+ default=default, b64=b64)
+ def _query_data(self, noun, seed_file, strip=False,
+ default=None, b64=None):
+ """Makes a request via "GET <NOUN>"
-def device_exists(device):
- """Symplistic method to determine if the device exists or not"""
- return os.path.exists(device)
+ In the response, the first line is the status, while subsequent
+ lines are is the value. A blank line with a "." is used to
+ indicate end of response.
+ If the response is expected to be base64 encoded, then set
+ b64encoded to true. Unfortantely, there is no way to know if
+ something is 100% encoded, so this method relies on being told
+ if the data is base64 or not.
+ """
-def get_serial(seed_device, seed_timeout):
- """This is replaced in unit testing, allowing us to replace
- serial.Serial with a mocked class.
-
- The timeout value of 60 seconds should never be hit. The value
- is taken from SmartOS own provisioning tools. Since we are reading
- each line individually up until the single ".", the transfer is
- usually very fast (i.e. microseconds) to get the response.
- """
- if not seed_device:
- raise AttributeError("seed_device value is not set")
-
- ser = serial.Serial(seed_device, timeout=seed_timeout)
- if not ser.isOpen():
- raise SystemError("Unable to open %s" % seed_device)
-
- return ser
+ if not noun:
+ return False
+ response = JoyentMetadataClient(seed_file).get_metadata(noun)
-def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
- b64=None):
- """Makes a request to via the serial console via "GET <NOUN>"
+ if response is None:
+ return default
- In the response, the first line is the status, while subsequent lines
- are is the value. A blank line with a "." is used to indicate end of
- response.
+ if b64 is None:
+ b64 = self._query_data('b64-%s' % noun, seed_file, b64=False,
+ default=False, strip=True)
+ b64 = util.is_true(b64)
- If the response is expected to be base64 encoded, then set b64encoded
- to true. Unfortantely, there is no way to know if something is 100%
- encoded, so this method relies on being told if the data is base64 or
- not.
- """
+ resp = None
+ if b64 or strip:
+ resp = "".join(response).rstrip()
+ else:
+ resp = "".join(response)
- if not noun:
- return False
+ if b64:
+ try:
+ return util.b64d(resp)
+ # Bogus input produces different errors in Python 2 and 3;
+ # catch both.
+ except (TypeError, binascii.Error):
+ LOG.warn("Failed base64 decoding key '%s'", noun)
+ return resp
- ser = get_serial(seed_device, seed_timeout)
- ser.write("GET %s\n" % noun.rstrip())
- status = str(ser.readline()).rstrip()
- response = []
- eom_found = False
+ return resp
- if 'SUCCESS' not in status:
- ser.close()
- return default
- while not eom_found:
- m = ser.readline()
- if m.rstrip() == ".":
- eom_found = True
- else:
- response.append(m)
+def device_exists(device):
+ """Symplistic method to determine if the device exists or not"""
+ return os.path.exists(device)
- ser.close()
- if b64 is None:
- b64 = query_data('b64-%s' % noun, seed_device=seed_device,
- seed_timeout=seed_timeout, b64=False,
- default=False, strip=True)
- b64 = util.is_true(b64)
+class JoyentMetadataFetchException(Exception):
+ pass
- resp = None
- if b64 or strip:
- resp = "".join(response).rstrip()
- else:
- resp = "".join(response)
- if b64:
- try:
- return base64.b64decode(resp)
- except TypeError:
- LOG.warn("Failed base64 decoding key '%s'", noun)
- return resp
+class JoyentMetadataClient(object):
+ """
+ A client implementing v2 of the Joyent Metadata Protocol Specification.
- return resp
+ The full specification can be found at
+ http://eng.joyent.com/mdata/protocol.html
+ """
+ line_regex = re.compile(
+ r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
+ r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
+ r'( (?P<payload>.+))?)')
+
+ def __init__(self, metasource):
+ self.metasource = metasource
+
+ def _checksum(self, body):
+ return '{0:08x}'.format(
+ binascii.crc32(body.encode('utf-8')) & 0xffffffff)
+
+ def _get_value_from_frame(self, expected_request_id, frame):
+ frame_data = self.line_regex.match(frame).groupdict()
+ if int(frame_data['length']) != len(frame_data['body']):
+ raise JoyentMetadataFetchException(
+ 'Incorrect frame length given ({0} != {1}).'.format(
+ frame_data['length'], len(frame_data['body'])))
+ expected_checksum = self._checksum(frame_data['body'])
+ if frame_data['checksum'] != expected_checksum:
+ raise JoyentMetadataFetchException(
+ 'Invalid checksum (expected: {0}; got {1}).'.format(
+ expected_checksum, frame_data['checksum']))
+ if frame_data['request_id'] != expected_request_id:
+ raise JoyentMetadataFetchException(
+ 'Request ID mismatch (expected: {0}; got {1}).'.format(
+ expected_request_id, frame_data['request_id']))
+ if not frame_data.get('payload', None):
+ LOG.debug('No value found.')
+ return None
+ value = util.b64d(frame_data['payload'])
+ LOG.debug('Value "%s" found.', value)
+ return value
+
+ def get_metadata(self, metadata_key):
+ LOG.debug('Fetching metadata key "%s"...', metadata_key)
+ request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
+ message_body = '{0} GET {1}'.format(request_id,
+ util.b64e(metadata_key))
+ msg = 'V2 {0} {1} {2}\n'.format(
+ len(message_body), self._checksum(message_body), message_body)
+ LOG.debug('Writing "%s" to metadata transport.', msg)
+ self.metasource.write(msg.encode('ascii'))
+ self.metasource.flush()
+
+ response = bytearray()
+ response.extend(self.metasource.read(1))
+ while response[-1:] != b'\n':
+ response.extend(self.metasource.read(1))
+ response = response.rstrip().decode('ascii')
+ LOG.debug('Read "%s" from metadata transport.', response)
+
+ if 'SUCCESS' not in response:
+ return None
+
+ return self._get_value_from_frame(request_id, response)
def dmi_data():
- sys_uuid, sys_type = None, None
- dmidecode_path = util.which('dmidecode')
- if not dmidecode_path:
- return False
-
- sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"]
- try:
- LOG.debug("Getting hostname from dmidecode")
- (sys_uuid, _err) = util.subp(sys_uuid_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed to get system UUID", e)
+ sys_type = util.read_dmi_data("system-product-name")
- sys_type_cmd = [dmidecode_path, "-s", "system-product-name"]
- try:
- LOG.debug("Determining hypervisor product name via dmidecode")
- (sys_type, _err) = util.subp(sys_type_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed to get system UUID", e)
+ if not sys_type:
+ return None
- return (sys_uuid.lower().strip(), sys_type.strip())
+ return sys_type
def write_boot_content(content, content_f, link=None, shebang=False,
- mode=0400):
+ mode=0o400):
"""
Write the content to content_f. Under the following rules:
1. If no content, remove the file
@@ -423,7 +512,7 @@ def write_boot_content(content, content_f, link=None, shebang=False,
except Exception as e:
util.logexc(LOG, ("Failed to identify script type for %s" %
- content_f, e))
+ content_f, e))
if link:
try:
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 7c7ef9ab..82cd3553 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -23,6 +23,8 @@
import abc
import os
+import six
+
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import type_utils
@@ -30,6 +32,7 @@ from cloudinit import user_data as ud
from cloudinit import util
from cloudinit.filters import launch_index
+from cloudinit.reporting import events
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
@@ -130,7 +133,7 @@ class DataSource(object):
# we want to return the correct value for what will actually
# exist in this instance
mappings = {"sd": ("vd", "xvd", "vtb")}
- for (nfrom, tlist) in mappings.iteritems():
+ for (nfrom, tlist) in mappings.items():
if not short_name.startswith(nfrom):
continue
for nto in tlist:
@@ -155,6 +158,10 @@ class DataSource(object):
return self.metadata.get('availability-zone',
self.metadata.get('availability_zone'))
+ @property
+ def region(self):
+ return self.metadata.get('region')
+
def get_instance_id(self):
if not self.metadata or 'instance-id' not in self.metadata:
# Return a magic not really instance id string
@@ -208,8 +215,15 @@ class DataSource(object):
return hostname
def get_package_mirror_info(self):
- return self.distro.get_package_mirror_info(
- availability_zone=self.availability_zone)
+ return self.distro.get_package_mirror_info(data_source=self)
+
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still
+ return False
+
+ @property
+ def network_config(self):
+ return None
def normalize_pubkey_data(pubkey_data):
@@ -218,18 +232,18 @@ def normalize_pubkey_data(pubkey_data):
if not pubkey_data:
return keys
- if isinstance(pubkey_data, (basestring, str)):
+ if isinstance(pubkey_data, six.string_types):
return str(pubkey_data).splitlines()
if isinstance(pubkey_data, (list, set)):
return list(pubkey_data)
if isinstance(pubkey_data, (dict)):
- for (_keyname, klist) in pubkey_data.iteritems():
+ for (_keyname, klist) in pubkey_data.items():
# lp:506332 uec metadata service responds with
# data that makes boto populate a string for 'klist' rather
# than a list.
- if isinstance(klist, (str, basestring)):
+ if isinstance(klist, six.string_types):
klist = [klist]
if isinstance(klist, (list, set)):
for pkey in klist:
@@ -241,17 +255,25 @@ def normalize_pubkey_data(pubkey_data):
return keys
-def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
+def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
ds_list = list_sources(cfg_list, ds_deps, pkg_list)
ds_names = [type_utils.obj_name(f) for f in ds_list]
- LOG.debug("Searching for data source in: %s", ds_names)
-
- for cls in ds_list:
+ mode = "network" if DEP_NETWORK in ds_deps else "local"
+ LOG.debug("Searching for %s data source in: %s", mode, ds_names)
+
+ for name, cls in zip(ds_names, ds_list):
+ myrep = events.ReportEventStack(
+ name="search-%s" % name.replace("DataSource", ""),
+ description="searching for %s data from %s" % (mode, name),
+ message="no %s data found from %s" % (mode, name),
+ parent=reporter)
try:
- LOG.debug("Seeing if we can get any data from %s", cls)
- s = cls(sys_cfg, distro, paths)
- if s.get_data():
- return (s, type_utils.obj_name(cls))
+ with myrep:
+ LOG.debug("Seeing if we can get any data from %s", cls)
+ s = cls(sys_cfg, distro, paths)
+ if s.get_data():
+ myrep.message = "found %s data from %s" % (mode, name)
+ return (s, type_utils.obj_name(cls))
except Exception:
util.logexc(LOG, "Getting data from %s failed", cls)
@@ -285,6 +307,18 @@ def list_sources(cfg_list, depends, pkg_list):
return src_list
+def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
+ # quickly (local check only) if self.instance_id is still valid
+ # we check kernel command line or files.
+ if not instance_id:
+ return False
+
+ dmi_value = util.read_dmi_data(field)
+ if not dmi_value:
+ return False
+ return instance_id.lower() == dmi_value.lower()
+
+
# 'depends' is a list of dependencies (DEP_FILESYSTEM)
# ds_list is a list of 2 item lists
# ds_list = [
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
new file mode 100644
index 00000000..018cac6d
--- /dev/null
+++ b/cloudinit/sources/helpers/azure.py
@@ -0,0 +1,278 @@
+import logging
+import os
+import re
+import socket
+import struct
+import tempfile
+import time
+from contextlib import contextmanager
+from xml.etree import ElementTree
+
+from cloudinit import util
+
+
+LOG = logging.getLogger(__name__)
+
+
+@contextmanager
+def cd(newdir):
+ prevdir = os.getcwd()
+ os.chdir(os.path.expanduser(newdir))
+ try:
+ yield
+ finally:
+ os.chdir(prevdir)
+
+
+class AzureEndpointHttpClient(object):
+
+ headers = {
+ 'x-ms-agent-name': 'WALinuxAgent',
+ 'x-ms-version': '2012-11-30',
+ }
+
+ def __init__(self, certificate):
+ self.extra_secure_headers = {
+ "x-ms-cipher-name": "DES_EDE3_CBC",
+ "x-ms-guest-agent-public-x509-cert": certificate,
+ }
+
+ def get(self, url, secure=False):
+ headers = self.headers
+ if secure:
+ headers = self.headers.copy()
+ headers.update(self.extra_secure_headers)
+ return util.read_file_or_url(url, headers=headers)
+
+ def post(self, url, data=None, extra_headers=None):
+ headers = self.headers
+ if extra_headers is not None:
+ headers = self.headers.copy()
+ headers.update(extra_headers)
+ return util.read_file_or_url(url, data=data, headers=headers)
+
+
+class GoalState(object):
+
+ def __init__(self, xml, http_client):
+ self.http_client = http_client
+ self.root = ElementTree.fromstring(xml)
+ self._certificates_xml = None
+
+ def _text_from_xpath(self, xpath):
+ element = self.root.find(xpath)
+ if element is not None:
+ return element.text
+ return None
+
+ @property
+ def container_id(self):
+ return self._text_from_xpath('./Container/ContainerId')
+
+ @property
+ def incarnation(self):
+ return self._text_from_xpath('./Incarnation')
+
+ @property
+ def instance_id(self):
+ return self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance/InstanceId')
+
+ @property
+ def certificates_xml(self):
+ if self._certificates_xml is None:
+ url = self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance'
+ '/Configuration/Certificates')
+ if url is not None:
+ self._certificates_xml = self.http_client.get(
+ url, secure=True).contents
+ return self._certificates_xml
+
+
+class OpenSSLManager(object):
+
+ certificate_names = {
+ 'private_key': 'TransportPrivate.pem',
+ 'certificate': 'TransportCert.pem',
+ }
+
+ def __init__(self):
+ self.tmpdir = tempfile.mkdtemp()
+ self.certificate = None
+ self.generate_certificate()
+
+ def clean_up(self):
+ util.del_dir(self.tmpdir)
+
+ def generate_certificate(self):
+ LOG.debug('Generating certificate for communication with fabric...')
+ if self.certificate is not None:
+ LOG.debug('Certificate already generated.')
+ return
+ with cd(self.tmpdir):
+ util.subp([
+ 'openssl', 'req', '-x509', '-nodes', '-subj',
+ '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
+ '-keyout', self.certificate_names['private_key'],
+ '-out', self.certificate_names['certificate'],
+ ])
+ certificate = ''
+ for line in open(self.certificate_names['certificate']):
+ if "CERTIFICATE" not in line:
+ certificate += line.rstrip()
+ self.certificate = certificate
+ LOG.debug('New certificate generated.')
+
+ def parse_certificates(self, certificates_xml):
+ tag = ElementTree.fromstring(certificates_xml).find(
+ './/Data')
+ certificates_content = tag.text
+ lines = [
+ b'MIME-Version: 1.0',
+ b'Content-Disposition: attachment; filename="Certificates.p7m"',
+ b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
+ b'Content-Transfer-Encoding: base64',
+ b'',
+ certificates_content.encode('utf-8'),
+ ]
+ with cd(self.tmpdir):
+ with open('Certificates.p7m', 'wb') as f:
+ f.write(b'\n'.join(lines))
+ out, _ = util.subp(
+ 'openssl cms -decrypt -in Certificates.p7m -inkey'
+ ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
+ ' -password pass:'.format(**self.certificate_names),
+ shell=True)
+ private_keys, certificates = [], []
+ current = []
+ for line in out.splitlines():
+ current.append(line)
+ if re.match(r'[-]+END .*?KEY[-]+$', line):
+ private_keys.append('\n'.join(current))
+ current = []
+ elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
+ certificates.append('\n'.join(current))
+ current = []
+ keys = []
+ for certificate in certificates:
+ with cd(self.tmpdir):
+ public_key, _ = util.subp(
+ 'openssl x509 -noout -pubkey |'
+ 'ssh-keygen -i -m PKCS8 -f /dev/stdin',
+ data=certificate,
+ shell=True)
+ keys.append(public_key)
+ return keys
+
+
+class WALinuxAgentShim(object):
+
+ REPORT_READY_XML_TEMPLATE = '\n'.join([
+ '<?xml version="1.0" encoding="utf-8"?>',
+ '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
+ ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
+ ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
+ ' <Container>',
+ ' <ContainerId>{container_id}</ContainerId>',
+ ' <RoleInstanceList>',
+ ' <Role>',
+ ' <InstanceId>{instance_id}</InstanceId>',
+ ' <Health>',
+ ' <State>Ready</State>',
+ ' </Health>',
+ ' </Role>',
+ ' </RoleInstanceList>',
+ ' </Container>',
+ '</Health>'])
+
+ def __init__(self):
+ LOG.debug('WALinuxAgentShim instantiated...')
+ self.endpoint = self.find_endpoint()
+ self.openssl_manager = None
+ self.values = {}
+
+ def clean_up(self):
+ if self.openssl_manager is not None:
+ self.openssl_manager.clean_up()
+
+ @staticmethod
+ def get_ip_from_lease_value(lease_value):
+ unescaped_value = lease_value.replace('\\', '')
+ if len(unescaped_value) > 4:
+ hex_string = ''
+ for hex_pair in unescaped_value.split(':'):
+ if len(hex_pair) == 1:
+ hex_pair = '0' + hex_pair
+ hex_string += hex_pair
+ packed_bytes = struct.pack(
+ '>L', int(hex_string.replace(':', ''), 16))
+ else:
+ packed_bytes = unescaped_value.encode('utf-8')
+ return socket.inet_ntoa(packed_bytes)
+
+ @staticmethod
+ def find_endpoint():
+ LOG.debug('Finding Azure endpoint...')
+ content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
+ value = None
+ for line in content.splitlines():
+ if 'unknown-245' in line:
+ value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
+ if value is None:
+ raise Exception('No endpoint found in DHCP config.')
+ endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
+ LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+ return endpoint_ip_address
+
+ def register_with_azure_and_fetch_data(self):
+ self.openssl_manager = OpenSSLManager()
+ http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
+ LOG.info('Registering with Azure...')
+ attempts = 0
+ while True:
+ try:
+ response = http_client.get(
+ 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
+ except Exception:
+ if attempts < 10:
+ time.sleep(attempts + 1)
+ else:
+ raise
+ else:
+ break
+ attempts += 1
+ LOG.debug('Successfully fetched GoalState XML.')
+ goal_state = GoalState(response.contents, http_client)
+ public_keys = []
+ if goal_state.certificates_xml is not None:
+ LOG.debug('Certificate XML found; parsing out public keys.')
+ public_keys = self.openssl_manager.parse_certificates(
+ goal_state.certificates_xml)
+ data = {
+ 'public-keys': public_keys,
+ }
+ self._report_ready(goal_state, http_client)
+ return data
+
+ def _report_ready(self, goal_state, http_client):
+ LOG.debug('Reporting ready to Azure fabric.')
+ document = self.REPORT_READY_XML_TEMPLATE.format(
+ incarnation=goal_state.incarnation,
+ container_id=goal_state.container_id,
+ instance_id=goal_state.instance_id,
+ )
+ http_client.post(
+ "http://{0}/machine?comp=health".format(self.endpoint),
+ data=document,
+ extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
+ )
+ LOG.info('Reported ready to Azure fabric.')
+
+
+def get_metadata_from_fabric():
+ shim = WALinuxAgentShim()
+ try:
+ return shim.register_with_azure_and_fetch_data()
+ finally:
+ shim.clean_up()
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index b7e19314..1aa6bbae 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -24,6 +24,8 @@ import copy
import functools
import os
+import six
+
from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import sources
@@ -49,11 +51,13 @@ OS_LATEST = 'latest'
OS_FOLSOM = '2012-08-10'
OS_GRIZZLY = '2013-04-04'
OS_HAVANA = '2013-10-17'
+OS_LIBERTY = '2015-10-15'
# keep this in chronological order. new supported versions go at the end.
OS_VERSIONS = (
OS_FOLSOM,
OS_GRIZZLY,
OS_HAVANA,
+ OS_LIBERTY,
)
@@ -205,7 +209,7 @@ class BaseReader(object):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, basestring, list))
+ util.load_json, root_types=(dict, list) + six.string_types)
def datafiles(version):
files = {}
@@ -227,6 +231,11 @@ class BaseReader(object):
False,
load_json_anytype,
)
+ files['networkdata'] = (
+ self._path_join("openstack", version, 'network_data.json'),
+ False,
+ load_json_anytype,
+ )
return files
results = {
@@ -234,7 +243,7 @@ class BaseReader(object):
'version': 2,
}
data = datafiles(self._find_working_version())
- for (name, (path, required, translator)) in data.iteritems():
+ for (name, (path, required, translator)) in data.items():
path = self._path_join(self.base_path, path)
data = None
found = False
@@ -325,14 +334,14 @@ class ConfigDriveReader(BaseReader):
return os.path.join(*components)
def _path_read(self, path):
- return util.load_file(path)
+ return util.load_file(path, decode=False)
def _fetch_available_versions(self):
if self._versions is None:
path = self._path_join(self.base_path, 'openstack')
found = [d for d in os.listdir(path)
if os.path.isdir(os.path.join(path))]
- self._versions = found
+ self._versions = sorted(found)
return self._versions
def _read_ec2_metadata(self):
@@ -364,7 +373,7 @@ class ConfigDriveReader(BaseReader):
raise NonReadable("%s: no files found" % (self.base_path))
md = {}
- for (name, (key, translator, default)) in FILES_V1.iteritems():
+ for (name, (key, translator, default)) in FILES_V1.items():
if name in found:
path = found[name]
try:
@@ -478,7 +487,7 @@ def convert_vendordata_json(data, recurse=True):
"""
if not data:
return None
- if isinstance(data, (str, unicode, basestring)):
+ if isinstance(data, six.string_types):
return data
if isinstance(data, list):
return copy.deepcopy(data)
diff --git a/cloudinit/sources/helpers/vmware/__init__.py b/cloudinit/sources/helpers/vmware/__init__.py
new file mode 100644
index 00000000..386225d5
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/__init__.py
@@ -0,0 +1,13 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/__init__.py b/cloudinit/sources/helpers/vmware/imc/__init__.py
new file mode 100644
index 00000000..386225d5
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/__init__.py
@@ -0,0 +1,13 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
new file mode 100644
index 00000000..faba5887
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -0,0 +1,25 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class BootProtoEnum:
+ """Specifies the NIC Boot Settings."""
+
+ DHCP = 'dhcp'
+ STATIC = 'static'
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
new file mode 100644
index 00000000..aebc12a0
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -0,0 +1,95 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .nic import Nic
+
+
+class Config:
+ """
+ Stores the Contents specified in the Customization
+ Specification file.
+ """
+
+ DNS = 'DNS|NAMESERVER|'
+ SUFFIX = 'DNS|SUFFIX|'
+ PASS = 'PASSWORD|-PASS'
+ TIMEZONE = 'DATETIME|TIMEZONE'
+ UTC = 'DATETIME|UTC'
+ HOSTNAME = 'NETWORK|HOSTNAME'
+ DOMAINNAME = 'NETWORK|DOMAINNAME'
+
+ def __init__(self, configFile):
+ self._configFile = configFile
+
+ @property
+ def host_name(self):
+ """Return the hostname."""
+ return self._configFile.get(Config.HOSTNAME, None)
+
+ @property
+ def domain_name(self):
+ """Return the domain name."""
+ return self._configFile.get(Config.DOMAINNAME, None)
+
+ @property
+ def timezone(self):
+ """Return the timezone."""
+ return self._configFile.get(Config.TIMEZONE, None)
+
+ @property
+ def utc(self):
+ """Retrieves whether to set time to UTC or Local."""
+ return self._configFile.get(Config.UTC, None)
+
+ @property
+ def admin_password(self):
+ """Return the root password to be set."""
+ return self._configFile.get(Config.PASS, None)
+
+ @property
+ def name_servers(self):
+ """Return the list of DNS servers."""
+ res = []
+ cnt = self._configFile.get_count_with_prefix(Config.DNS)
+ for i in range(1, cnt + 1):
+ key = Config.DNS + str(i)
+ res.append(self._configFile[key])
+
+ return res
+
+ @property
+ def dns_suffixes(self):
+ """Return the list of DNS Suffixes."""
+ res = []
+ cnt = self._configFile.get_count_with_prefix(Config.SUFFIX)
+ for i in range(1, cnt + 1):
+ key = Config.SUFFIX + str(i)
+ res.append(self._configFile[key])
+
+ return res
+
+ @property
+ def nics(self):
+ """Return the list of associated NICs."""
+ res = []
+ nics = self._configFile['NIC-CONFIG|NICS']
+ for nic in nics.split(','):
+ res.append(Nic(nic, self._configFile))
+
+ return res
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
new file mode 100644
index 00000000..bb9fb7dc
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -0,0 +1,129 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+
+from .config_source import ConfigSource
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigFile(ConfigSource, dict):
+ """ConfigFile module to load the content from a specified source."""
+
+ def __init__(self, filename):
+ self._loadConfigFile(filename)
+ pass
+
+ def _insertKey(self, key, val):
+ """
+ Inserts a Key Value pair.
+
+ Keyword arguments:
+ key -- The key to insert
+ val -- The value to insert for the key
+
+ """
+ key = key.strip()
+ val = val.strip()
+
+ if key.startswith('-') or '|-' in key:
+ canLog = False
+ else:
+ canLog = True
+
+ # "sensitive" settings shall not be logged
+ if canLog:
+ logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val))
+ else:
+ logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key)
+
+ self[key] = val
+
+ def _loadConfigFile(self, filename):
+ """
+ Parses properties from the specified config file.
+
+ Any previously available properties will be removed.
+ Sensitive data will not be logged in case the key starts
+ from '-'.
+
+ Keyword arguments:
+ filename - The full path to the config file.
+ """
+ logger.info('Parsing the config file %s.' % filename)
+
+ config = configparser.ConfigParser()
+ config.optionxform = str
+ config.read(filename)
+
+ self.clear()
+
+ for category in config.sections():
+ logger.debug("FOUND CATEGORY = '%s'" % category)
+
+ for (key, value) in config.items(category):
+ self._insertKey(category + '|' + key, value)
+
+ def should_keep_current_value(self, key):
+ """
+ Determines whether a value for a property must be kept.
+
+ If the propery is missing, it is treated as it should be not
+ changed by the engine.
+
+ Keyword arguments:
+ key -- The key to search for.
+ """
+ # helps to distinguish from "empty" value which is used to indicate
+ # "removal"
+ return key not in self
+
+ def should_remove_current_value(self, key):
+ """
+ Determines whether a value for the property must be removed.
+
+ If the specified key is empty, it is treated as it should be
+ removed by the engine.
+
+ Return true if the value can be removed, false otherwise.
+
+ Keyword arguments:
+ key -- The key to search for.
+ """
+ # helps to distinguish from "missing" value which is used to indicate
+ # "keeping unchanged"
+ if key in self:
+ return not bool(self[key])
+ else:
+ return False
+
+ def get_count_with_prefix(self, prefix):
+ """
+ Return the total count of keys that start with the specified prefix.
+
+ Keyword arguments:
+ prefix -- prefix of the key
+ """
+ return len([key for key in self if key.startswith(prefix)])
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
new file mode 100644
index 00000000..7266b699
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -0,0 +1,25 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .config_source import ConfigSource
+
+
+class ConfigNamespace(ConfigSource):
+ """Specifies the Config Namespace."""
+ pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
new file mode 100644
index 00000000..77098a05
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -0,0 +1,247 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2016 VMware INC.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+import os
+import re
+
+from cloudinit import util
+
+logger = logging.getLogger(__name__)
+
+
+class NicConfigurator:
+ def __init__(self, nics):
+ """
+ Initialize the Nic Configurator
+ @param nics (list) an array of nics to configure
+ """
+ self.nics = nics
+ self.mac2Name = {}
+ self.ipv4PrimaryGateway = None
+ self.ipv6PrimaryGateway = None
+ self.find_devices()
+ self._primaryNic = self.get_primary_nic()
+
+ def get_primary_nic(self):
+ """
+ Retrieve the primary nic if it exists
+ @return (NicBase): the primary nic if exists, None otherwise
+ """
+ primary_nics = [nic for nic in self.nics if nic.primary]
+ if not primary_nics:
+ return None
+ elif len(primary_nics) > 1:
+ raise Exception('There can only be one primary nic',
+ [nic.mac for nic in primary_nics])
+ else:
+ return primary_nics[0]
+
+ def find_devices(self):
+ """
+ Create the mac2Name dictionary
+ The mac address(es) are in the lower case
+ """
+ cmd = ['ip', 'addr', 'show']
+ (output, err) = util.subp(cmd)
+ sections = re.split(r'\n\d+: ', '\n' + output)[1:]
+
+ macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
+ for section in sections:
+ match = re.search(macPat, section)
+ if not match: # Only keep info about nics
+ continue
+ mac = match.group(1).lower()
+ name = section.split(':', 1)[0]
+ self.mac2Name[mac] = name
+
+ def gen_one_nic(self, nic):
+ """
+ Return the lines needed to configure a nic
+ @return (str list): the string list to configure the nic
+ @param nic (NicBase): the nic to configure
+ """
+ lines = []
+ name = self.mac2Name.get(nic.mac.lower())
+ if not name:
+ raise ValueError('No known device has MACADDR: %s' % nic.mac)
+
+ if nic.onboot:
+ lines.append('auto %s' % name)
+
+ # Customize IPv4
+ lines.extend(self.gen_ipv4(name, nic))
+
+ # Customize IPv6
+ lines.extend(self.gen_ipv6(name, nic))
+
+ lines.append('')
+
+ return lines
+
+ def gen_ipv4(self, name, nic):
+ """
+ Return the lines needed to configure the IPv4 setting of a nic
+ @return (str list): the string list to configure the gateways
+ @param name (str): name of the nic
+ @param nic (NicBase): the nic to configure
+ """
+ lines = []
+
+ bootproto = nic.bootProto.lower()
+ if nic.ipv4_mode.lower() == 'disabled':
+ bootproto = 'manual'
+ lines.append('iface %s inet %s' % (name, bootproto))
+
+ if bootproto != 'static':
+ return lines
+
+ # Static Ipv4
+ v4 = nic.staticIpv4
+ if v4.ip:
+ lines.append(' address %s' % v4.ip)
+ if v4.netmask:
+ lines.append(' netmask %s' % v4.netmask)
+
+ # Add the primary gateway
+ if nic.primary and v4.gateways:
+ self.ipv4PrimaryGateway = v4.gateways[0]
+ lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway)
+ return lines
+
+ # Add routes if there is no primary nic
+ if not self._primaryNic:
+ lines.extend(self.gen_ipv4_route(nic, v4.gateways))
+
+ return lines
+
+ def gen_ipv4_route(self, nic, gateways):
+ """
+ Return the lines needed to configure additional Ipv4 route
+ @return (str list): the string list to configure the gateways
+ @param nic (NicBase): the nic to configure
+ @param gateways (str list): the list of gateways
+ """
+ lines = []
+
+ for gateway in gateways:
+ lines.append(' up route add default gw %s metric 10000' %
+ gateway)
+
+ return lines
+
+ def gen_ipv6(self, name, nic):
+ """
+ Return the lines needed to configure the gateways for a nic
+ @return (str list): the string list to configure the gateways
+ @param name (str): name of the nic
+ @param nic (NicBase): the nic to configure
+ """
+ lines = []
+
+ if not nic.staticIpv6:
+ return lines
+
+ # Static Ipv6
+ addrs = nic.staticIpv6
+ lines.append('iface %s inet6 static' % name)
+ lines.append(' address %s' % addrs[0].ip)
+ lines.append(' netmask %s' % addrs[0].netmask)
+
+ for addr in addrs[1:]:
+ lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
+ addr.netmask))
+ # Add the primary gateway
+ if nic.primary:
+ for addr in addrs:
+ if addr.gateway:
+ self.ipv6PrimaryGateway = addr.gateway
+ lines.append(' gateway %s' % self.ipv6PrimaryGateway)
+ return lines
+
+ # Add routes if there is no primary nic
+ if not self._primaryNic:
+ lines.extend(self._genIpv6Route(name, nic, addrs))
+
+ return lines
+
+ def _genIpv6Route(self, name, nic, addrs):
+ lines = []
+
+ for addr in addrs:
+ lines.append(' up route -A inet6 add default gw '
+ '%s metric 10000' % addr.gateway)
+
+ return lines
+
+ def generate(self):
+ """Return the lines that is needed to configure the nics"""
+ lines = []
+ lines.append('iface lo inet loopback')
+ lines.append('auto lo')
+ lines.append('')
+
+ for nic in self.nics:
+ lines.extend(self.gen_one_nic(nic))
+
+ return lines
+
+ def clear_dhcp(self):
+ logger.info('Clearing DHCP leases')
+
+ # Ignore the return code 1.
+ util.subp(["pkill", "dhclient"], rcs=[0, 1])
+ util.subp(["rm", "-f", "/var/lib/dhcp/*"])
+
+ def if_down_up(self):
+ names = []
+ for nic in self.nics:
+ name = self.mac2Name.get(nic.mac.lower())
+ names.append(name)
+
+ for name in names:
+ logger.info('Bring down interface %s' % name)
+ util.subp(["ifdown", "%s" % name])
+
+ self.clear_dhcp()
+
+ for name in names:
+ logger.info('Bring up interface %s' % name)
+ util.subp(["ifup", "%s" % name])
+
+ def configure(self):
+ """
+ Configure the /etc/network/intefaces
+ Make a back up of the original
+ """
+ containingDir = '/etc/network'
+
+ interfaceFile = os.path.join(containingDir, 'interfaces')
+ originalFile = os.path.join(containingDir,
+ 'interfaces.before_vmware_customization')
+
+ if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
+ os.rename(interfaceFile, originalFile)
+
+ lines = self.generate()
+ with open(interfaceFile, 'w') as fp:
+ for line in lines:
+ fp.write('%s\n' % line)
+
+ self.if_down_up()
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
new file mode 100644
index 00000000..a367e476
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -0,0 +1,23 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ConfigSource:
+ """Specifies a source for the Config Content."""
+ pass
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
new file mode 100644
index 00000000..1b04161f
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -0,0 +1,24 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class GuestCustErrorEnum:
+ """Specifies different errors of Guest Customization engine"""
+
+ GUESTCUST_ERROR_SUCCESS = 0
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
new file mode 100644
index 00000000..fc22568f
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
@@ -0,0 +1,27 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class GuestCustEventEnum:
+ """Specifies different types of Guest Customization Events"""
+
+ GUESTCUST_EVENT_CUSTOMIZE_FAILED = 100
+ GUESTCUST_EVENT_NETWORK_SETUP_FAILED = 101
+ GUESTCUST_EVENT_ENABLE_NICS = 103
+ GUESTCUST_EVENT_QUERY_NICS = 104
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
new file mode 100644
index 00000000..f255be5f
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
@@ -0,0 +1,25 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class GuestCustStateEnum:
+ """Specifies different states of Guest Customization engine"""
+
+ GUESTCUST_STATE_RUNNING = 4
+ GUESTCUST_STATE_DONE = 5
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
new file mode 100644
index 00000000..d39f0a65
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -0,0 +1,128 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+import os
+import time
+
+from cloudinit import util
+
+from .guestcust_state import GuestCustStateEnum
+from .guestcust_event import GuestCustEventEnum
+
+logger = logging.getLogger(__name__)
+
+
+CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log"
+QUERY_NICS_SUPPORTED = "queryNicsSupported"
+NICS_STATUS_CONNECTED = "connected"
+
+
+# This will send a RPC command to the underlying
+# VMware Virtualization Platform.
+def send_rpc(rpc):
+ if not rpc:
+ return None
+
+ out = ""
+ err = "Error sending the RPC command"
+
+ try:
+ logger.debug("Sending RPC command: %s", rpc)
+ (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
+ # Remove the trailing newline in the output.
+ if out:
+ out = out.rstrip()
+ except Exception as e:
+ logger.debug("Failed to send RPC command")
+ logger.exception(e)
+
+ return (out, err)
+
+
+# This will send the customization status to the
+# underlying VMware Virtualization Platform.
+def set_customization_status(custstate, custerror, errormessage=None):
+ message = ""
+
+ if errormessage:
+ message = CLOUDINIT_LOG_FILE + "@" + errormessage
+ else:
+ message = CLOUDINIT_LOG_FILE
+
+ rpc = "deployPkg.update.state %d %d %s" % (custstate, custerror, message)
+ (out, err) = send_rpc(rpc)
+ return (out, err)
+
+
+# This will read the file nics.txt in the specified directory
+# and return the content
+def get_nics_to_enable(dirpath):
+ if not dirpath:
+ return None
+
+ NICS_SIZE = 1024
+ nicsfilepath = os.path.join(dirpath, "nics.txt")
+ if not os.path.exists(nicsfilepath):
+ return None
+
+ with open(nicsfilepath, 'r') as fp:
+ nics = fp.read(NICS_SIZE)
+
+ return nics
+
+
+# This will send a RPC command to the underlying VMware Virtualization platform
+# and enable nics.
+def enable_nics(nics):
+ if not nics:
+ logger.warning("No Nics found")
+ return
+
+ enableNicsWaitRetries = 5
+ enableNicsWaitCount = 5
+ enableNicsWaitSeconds = 1
+
+ for attempt in range(0, enableNicsWaitRetries):
+ logger.debug("Trying to connect interfaces, attempt %d", attempt)
+ (out, err) = set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
+ nics)
+ if not out:
+ time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
+ continue
+
+ if out != QUERY_NICS_SUPPORTED:
+ logger.warning("NICS connection status query is not supported")
+ return
+
+ for count in range(0, enableNicsWaitCount):
+ (out, err) = set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
+ nics)
+ if out and out == NICS_STATUS_CONNECTED:
+ logger.info("NICS are connected on %d second", count)
+ return
+
+ time.sleep(enableNicsWaitSeconds)
+
+ logger.warning("Can't connect network interfaces after %d attempts",
+ enableNicsWaitRetries)
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
new file mode 100644
index 00000000..33f88726
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -0,0 +1,45 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class Ipv4ModeEnum:
+ """
+ The IPv4 configuration mode which directly represents the user's goal.
+
+ This mode effectively acts as a contract of the in-guest customization
+ engine. It must be set based on what the user has requested and should
+ not be changed by those layers. It's up to the in-guest engine to
+ interpret and materialize the user's request.
+ """
+
+ # The legacy mode which only allows dhcp/static based on whether IPv4
+ # addresses list is empty or not
+ IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
+
+ # IPv4 must use static address. Reserved for future use
+ IPV4_MODE_STATIC = 'STATIC'
+
+ # IPv4 must use DHCPv4. Reserved for future use
+ IPV4_MODE_DHCP = 'DHCP'
+
+ # IPv4 must be disabled
+ IPV4_MODE_DISABLED = 'DISABLED'
+
+ # IPv4 settings should be left untouched. Reserved for future use
+ IPV4_MODE_AS_IS = 'AS_IS'
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
new file mode 100644
index 00000000..b5d704ea
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -0,0 +1,147 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .boot_proto import BootProtoEnum
+from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base
+
+
+class Nic(NicBase):
+ """
+ Holds the information about each NIC specified
+ in the customization specification file
+ """
+
+ def __init__(self, name, configFile):
+ self._name = name
+ self._configFile = configFile
+
+ def _get(self, what):
+ return self._configFile.get(self.name + '|' + what, None)
+
+ def _get_count_with_prefix(self, prefix):
+ return self._configFile.get_count_with_prefix(self.name + prefix)
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def mac(self):
+ return self._get('MACADDR').lower()
+
+ @property
+ def primary(self):
+ value = self._get('PRIMARY')
+ if value:
+ value = value.lower()
+ return value == 'yes' or value == 'true'
+ else:
+ return False
+
+ @property
+ def onboot(self):
+ value = self._get('ONBOOT')
+ if value:
+ value = value.lower()
+ return value == 'yes' or value == 'true'
+ else:
+ return False
+
+ @property
+ def bootProto(self):
+ value = self._get('BOOTPROTO')
+ if value:
+ return value.lower()
+ else:
+ return ""
+
+ @property
+ def ipv4_mode(self):
+ value = self._get('IPv4_MODE')
+ if value:
+ return value.lower()
+ else:
+ return ""
+
+ @property
+ def staticIpv4(self):
+ """
+ Checks the BOOTPROTO property and returns StaticIPv4Addr
+ configuration object if STATIC configuration is set.
+ """
+ if self.bootProto == BootProtoEnum.STATIC:
+ return [StaticIpv4Addr(self)]
+ else:
+ return None
+
+ @property
+ def staticIpv6(self):
+ cnt = self._get_count_with_prefix('|IPv6ADDR|')
+
+ if not cnt:
+ return None
+
+ result = []
+ for index in range(1, cnt + 1):
+ result.append(StaticIpv6Addr(self, index))
+
+ return result
+
+
+class StaticIpv4Addr(StaticIpv4Base):
+ """Static IPV4 Setting."""
+
+ def __init__(self, nic):
+ self._nic = nic
+
+ @property
+ def ip(self):
+ return self._nic._get('IPADDR')
+
+ @property
+ def netmask(self):
+ return self._nic._get('NETMASK')
+
+ @property
+ def gateways(self):
+ value = self._nic._get('GATEWAY')
+ if value:
+ return [x.strip() for x in value.split(',')]
+ else:
+ return None
+
+
+class StaticIpv6Addr(StaticIpv6Base):
+ """Static IPV6 Address."""
+
+ def __init__(self, nic, index):
+ self._nic = nic
+ self._index = index
+
+ @property
+ def ip(self):
+ return self._nic._get('IPv6ADDR|' + str(self._index))
+
+ @property
+ def netmask(self):
+ return self._nic._get('IPv6NETMASK|' + str(self._index))
+
+ @property
+ def gateway(self):
+ return self._nic._get('IPv6GATEWAY|' + str(self._index))
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
new file mode 100644
index 00000000..030ba311
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py
@@ -0,0 +1,154 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class NicBase:
+ """
+ Define what are expected of each nic.
+ The following properties should be provided in an implementation class.
+ """
+
+ @property
+ def mac(self):
+ """
+ Retrieves the mac address of the nic
+ @return (str) : the MACADDR setting
+ """
+ raise NotImplementedError('MACADDR')
+
+ @property
+ def primary(self):
+ """
+ Retrieves whether the nic is the primary nic
+ Indicates whether NIC will be used to define the default gateway.
+ If none of the NICs is configured to be primary, default gateway won't
+ be set.
+ @return (bool): the PRIMARY setting
+ """
+ raise NotImplementedError('PRIMARY')
+
+ @property
+ def onboot(self):
+ """
+ Retrieves whether the nic should be up at the boot time
+ @return (bool) : the ONBOOT setting
+ """
+ raise NotImplementedError('ONBOOT')
+
+ @property
+ def bootProto(self):
+ """
+ Retrieves the boot protocol of the nic
+ @return (str): the BOOTPROTO setting, valid values: dhcp and static.
+ """
+ raise NotImplementedError('BOOTPROTO')
+
+ @property
+ def ipv4_mode(self):
+ """
+ Retrieves the IPv4_MODE
+ @return (str): the IPv4_MODE setting, valid values:
+ backwards_compatible, static, dhcp, disabled, as_is
+ """
+ raise NotImplementedError('IPv4_MODE')
+
+ @property
+ def staticIpv4(self):
+ """
+ Retrieves the static IPv4 configuration of the nic
+ @return (StaticIpv4Base list): the static ipv4 setting
+ """
+ raise NotImplementedError('Static IPv4')
+
+ @property
+ def staticIpv6(self):
+ """
+ Retrieves the IPv6 configuration of the nic
+ @return (StaticIpv6Base list): the static ipv6 setting
+ """
+ raise NotImplementedError('Static Ipv6')
+
+ def validate(self):
+ """
+ Validate the object
+ For example, the staticIpv4 property is required and should not be
+ empty when ipv4Mode is STATIC
+ """
+ raise NotImplementedError('Check constraints on properties')
+
+
+class StaticIpv4Base:
+ """
+ Define what are expected of a static IPv4 setting
+ The following properties should be provided in an implementation class.
+ """
+
+ @property
+ def ip(self):
+ """
+ Retrieves the Ipv4 address
+ @return (str): the IPADDR setting
+ """
+ raise NotImplementedError('Ipv4 Address')
+
+ @property
+ def netmask(self):
+ """
+ Retrieves the Ipv4 NETMASK setting
+ @return (str): the NETMASK setting
+ """
+ raise NotImplementedError('Ipv4 NETMASK')
+
+ @property
+ def gateways(self):
+ """
+ Retrieves the gateways on this Ipv4 subnet
+ @return (str list): the GATEWAY setting
+ """
+ raise NotImplementedError('Ipv4 GATEWAY')
+
+
+class StaticIpv6Base:
+ """Define what are expected of a static IPv6 setting
+ The following properties should be provided in an implementation class.
+ """
+
+ @property
+ def ip(self):
+ """
+ Retrieves the Ipv6 address
+ @return (str): the IPv6ADDR setting
+ """
+ raise NotImplementedError('Ipv6 Address')
+
+ @property
+ def netmask(self):
+ """
+ Retrieves the Ipv6 NETMASK setting
+ @return (str): the IPv6NETMASK setting
+ """
+ raise NotImplementedError('Ipv6 NETMASK')
+
+ @property
+ def gateway(self):
+ """
+ Retrieves the Ipv6 GATEWAY setting
+ @return (str): the IPv6GATEWAY setting
+ """
+ raise NotImplementedError('Ipv6 GATEWAY')
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 14d0cb0f..c74a7ae2 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -31,7 +31,8 @@ LOG = logging.getLogger(__name__)
DEF_SSHD_CFG = "/etc/ssh/sshd_config"
# taken from openssh source key.c/key_type_from_name
-VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
+VALID_KEY_TYPES = (
+ "rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
"ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
"ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
"ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com",
@@ -239,7 +240,7 @@ def setup_user_keys(keys, username, options=None):
# Make sure the users .ssh dir is setup accordingly
(ssh_dir, pwent) = users_ssh_info(username)
if not os.path.isdir(ssh_dir):
- util.ensure_dir(ssh_dir, mode=0700)
+ util.ensure_dir(ssh_dir, mode=0o700)
util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
# Turn the 'update' keys given into actual entries
@@ -252,8 +253,8 @@ def setup_user_keys(keys, username, options=None):
(auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
with util.SeLinuxGuard(ssh_dir, recursive=True):
content = update_authorized_keys(auth_key_entries, key_entries)
- util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700)
- util.write_file(auth_key_fn, content, mode=0600)
+ util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700)
+ util.write_file(auth_key_fn, content, mode=0o600)
util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 67f467f7..3fbb4443 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -20,12 +20,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cPickle as pickle
-
import copy
import os
import sys
+import six
+from six.moves import cPickle as pickle
+
from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG)
from cloudinit import handlers
@@ -42,9 +43,11 @@ from cloudinit import distros
from cloudinit import helpers
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import sources
from cloudinit import type_utils
from cloudinit import util
+from cloudinit.reporting import events
LOG = logging.getLogger(__name__)
@@ -52,7 +55,7 @@ NULL_DATA_SOURCE = None
class Init(object):
- def __init__(self, ds_deps=None):
+ def __init__(self, ds_deps=None, reporter=None):
if ds_deps is not None:
self.ds_deps = ds_deps
else:
@@ -64,6 +67,12 @@ class Init(object):
# Changed only when a fetch occurs
self.datasource = NULL_DATA_SOURCE
+ if reporter is None:
+ reporter = events.ReportEventStack(
+ name="init-reporter", description="init-desc",
+ reporting_enabled=False)
+ self.reporter = reporter
+
def _reset(self, reset_ds=False):
# Recreated on access
self._cfg = None
@@ -132,7 +141,7 @@ class Init(object):
]
return initial_dirs
- def purge_cache(self, rm_instance_lnk=True):
+ def purge_cache(self, rm_instance_lnk=False):
rm_list = []
rm_list.append(self.paths.boot_finished)
if rm_instance_lnk:
@@ -147,16 +156,25 @@ class Init(object):
def _initialize_filesystem(self):
util.ensure_dirs(self._initial_subdirs())
log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
- perms = util.get_cfg_option_str(self.cfg, 'syslog_fix_perms')
if log_file:
util.ensure_file(log_file)
- if perms:
- u, g = util.extract_usergroup(perms)
+ perms = self.cfg.get('syslog_fix_perms')
+ if not perms:
+ perms = {}
+ if not isinstance(perms, list):
+ perms = [perms]
+
+ error = None
+ for perm in perms:
+ u, g = util.extract_usergroup(perm)
try:
util.chownbyname(log_file, u, g)
- except OSError:
- util.logexc(LOG, "Unable to change the ownership of %s to "
- "user %s, group %s", log_file, u, g)
+ return
+ except OSError as e:
+ error = e
+
+ LOG.warn("Failed changing perms on '%s'. tried: %s. %s",
+ log_file, ','.join(perms), error)
def read_cfg(self, extra_fns=None):
# None check so that we don't keep on re-loading if empty
@@ -176,37 +194,12 @@ class Init(object):
# We try to restore from a current link and static path
# by using the instance link, if purge_cache was called
# the file wont exist.
- pickled_fn = self.paths.get_ipath_cur('obj_pkl')
- pickle_contents = None
- try:
- pickle_contents = util.load_file(pickled_fn)
- except Exception:
- pass
- # This is expected so just return nothing
- # successfully loaded...
- if not pickle_contents:
- return None
- try:
- return pickle.loads(pickle_contents)
- except Exception:
- util.logexc(LOG, "Failed loading pickled blob from %s", pickled_fn)
- return None
+ return _pkl_load(self.paths.get_ipath_cur('obj_pkl'))
def _write_to_cache(self):
if self.datasource is NULL_DATA_SOURCE:
return False
- pickled_fn = self.paths.get_ipath_cur("obj_pkl")
- try:
- pk_contents = pickle.dumps(self.datasource)
- except Exception:
- util.logexc(LOG, "Failed pickling datasource %s", self.datasource)
- return False
- try:
- util.write_file(pickled_fn, pk_contents, mode=0400)
- except Exception:
- util.logexc(LOG, "Failed pickling datasource to %s", pickled_fn)
- return False
- return True
+ return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
def _get_datasources(self):
# Any config provided???
@@ -218,13 +211,30 @@ class Init(object):
cfg_list = self.cfg.get('datasource_list') or []
return (cfg_list, pkg_list)
- def _get_data_source(self):
+ def _get_data_source(self, existing):
if self.datasource is not NULL_DATA_SOURCE:
return self.datasource
- ds = self._restore_from_cache()
- if ds:
- LOG.debug("Restored from cache, datasource: %s", ds)
+
+ with events.ReportEventStack(
+ name="check-cache",
+ description="attempting to read from cache [%s]" % existing,
+ parent=self.reporter) as myrep:
+ ds = self._restore_from_cache()
+ if ds and existing == "trust":
+ myrep.description = "restored from cache: %s" % ds
+ elif ds and existing == "check":
+ if (hasattr(ds, 'check_instance_id') and
+ ds.check_instance_id(self.cfg)):
+ myrep.description = "restored from checked cache: %s" % ds
+ else:
+ myrep.description = "cache invalid in datasource: %s" % ds
+ ds = None
+ else:
+ myrep.description = "no cache found"
+ LOG.debug(myrep.description)
+
if not ds:
+ util.del_file(self.paths.instance_link)
(cfg_list, pkg_list) = self._get_datasources()
# Deep copy so that user-data handlers can not modify
# (which will affect user-data handlers down the line...)
@@ -233,7 +243,7 @@ class Init(object):
self.paths,
copy.deepcopy(self.ds_deps),
cfg_list,
- pkg_list)
+ pkg_list, self.reporter)
LOG.info("Loaded datasource %s - %s", dsname, ds)
self.datasource = ds
# Ensure we adjust our path members datasource
@@ -304,8 +314,8 @@ class Init(object):
self._reset()
return iid
- def fetch(self):
- return self._get_data_source()
+ def fetch(self, existing="check"):
+ return self._get_data_source(existing=existing)
def instancify(self):
return self._reflect_cur_instance()
@@ -314,7 +324,8 @@ class Init(object):
# Form the needed options to cloudify our members
return cloud.Cloud(self.datasource,
self.paths, self.cfg,
- self.distro, helpers.Runners(self.paths))
+ self.distro, helpers.Runners(self.paths),
+ reporter=self.reporter)
def update(self):
if not self._write_to_cache():
@@ -323,16 +334,27 @@ class Init(object):
self._store_vendordata()
def _store_userdata(self):
- raw_ud = "%s" % (self.datasource.get_userdata_raw())
- util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0600)
- processed_ud = "%s" % (self.datasource.get_userdata())
- util.write_file(self._get_ipath('userdata'), processed_ud, 0600)
+ raw_ud = self.datasource.get_userdata_raw()
+ if raw_ud is None:
+ raw_ud = b''
+ util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600)
+ # processed userdata is a Mime message, so write it as string.
+ processed_ud = self.datasource.get_userdata()
+ if processed_ud is None:
+ raw_ud = ''
+ util.write_file(self._get_ipath('userdata'), str(processed_ud), 0o600)
def _store_vendordata(self):
- raw_vd = "%s" % (self.datasource.get_vendordata_raw())
- util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0600)
- processed_vd = "%s" % (self.datasource.get_vendordata())
- util.write_file(self._get_ipath('vendordata'), processed_vd, 0600)
+ raw_vd = self.datasource.get_vendordata_raw()
+ if raw_vd is None:
+ raw_vd = b''
+ util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600)
+ # processed vendor data is a Mime message, so write it as string.
+ processed_vd = str(self.datasource.get_vendordata())
+ if processed_vd is None:
+ processed_vd = ''
+ util.write_file(self._get_ipath('vendordata'), str(processed_vd),
+ 0o600)
def _default_handlers(self, opts=None):
if opts is None:
@@ -384,7 +406,7 @@ class Init(object):
if not path or not os.path.isdir(path):
return
potential_handlers = util.find_modules(path)
- for (fname, mod_name) in potential_handlers.iteritems():
+ for (fname, mod_name) in potential_handlers.items():
try:
mod_locs, looked_locs = importer.find_module(
mod_name, [''], ['list_types', 'handle_part'])
@@ -422,7 +444,7 @@ class Init(object):
def init_handlers():
# Init the handlers first
- for (_ctype, mod) in c_handlers.iteritems():
+ for (_ctype, mod) in c_handlers.items():
if mod in c_handlers.initialized:
# Avoid initing the same module twice (if said module
# is registered to more than one content-type).
@@ -449,7 +471,7 @@ class Init(object):
def finalize_handlers():
# Give callbacks opportunity to finalize
- for (_ctype, mod) in c_handlers.iteritems():
+ for (_ctype, mod) in c_handlers.items():
if mod not in c_handlers.initialized:
# Said module was never inited in the first place, so lets
# not attempt to finalize those that never got called.
@@ -469,8 +491,14 @@ class Init(object):
def consume_data(self, frequency=PER_INSTANCE):
# Consume the userdata first, because we need want to let the part
# handlers run first (for merging stuff)
- self._consume_userdata(frequency)
- self._consume_vendordata(frequency)
+ with events.ReportEventStack("consume-user-data",
+ "reading and applying user-data",
+ parent=self.reporter):
+ self._consume_userdata(frequency)
+ with events.ReportEventStack("consume-vendor-data",
+ "reading and applying vendor-data",
+ parent=self.reporter):
+ self._consume_vendordata(frequency)
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
@@ -541,13 +569,53 @@ class Init(object):
# Run the handlers
self._do_handlers(user_data_msg, c_handlers_list, frequency)
+ def _find_networking_config(self):
+ disable_file = os.path.join(
+ self.paths.get_cpath('data'), 'upgraded-network')
+ if os.path.exists(disable_file):
+ return (None, disable_file)
+
+ cmdline_cfg = ('cmdline', net.read_kernel_cmdline_config())
+ dscfg = ('ds', None)
+ if self.datasource and hasattr(self.datasource, 'network_config'):
+ dscfg = ('ds', self.datasource.network_config)
+ sys_cfg = ('system_cfg', self.cfg.get('network'))
+
+ for loc, ncfg in (cmdline_cfg, dscfg, sys_cfg):
+ if net.is_disabled_cfg(ncfg):
+ LOG.debug("network config disabled by %s", loc)
+ return (None, loc)
+ if ncfg:
+ return (ncfg, loc)
+ return (net.generate_fallback_config(), "fallback")
+
+ def apply_network_config(self):
+ netcfg, src = self._find_networking_config()
+ if netcfg is None:
+ LOG.info("network config is disabled by %s", src)
+ return
+
+ LOG.info("Applying network configuration from %s: %s", src, netcfg)
+ try:
+ return self.distro.apply_network_config(netcfg)
+ except NotImplementedError:
+ LOG.warn("distro '%s' does not implement apply_network_config. "
+ "networking may not be configured properly." %
+ self.distro)
+ return
+
class Modules(object):
- def __init__(self, init, cfg_files=None):
+ def __init__(self, init, cfg_files=None, reporter=None):
self.init = init
self.cfg_files = cfg_files
# Created on first use
self._cached_cfg = None
+ if reporter is None:
+ reporter = events.ReportEventStack(
+ name="module-reporter", description="module-desc",
+ reporting_enabled=False)
+ self.reporter = reporter
@property
def cfg(self):
@@ -574,7 +642,7 @@ class Modules(object):
for item in cfg_mods:
if not item:
continue
- if isinstance(item, (str, basestring)):
+ if isinstance(item, six.string_types):
module_list.append({
'mod': item.strip(),
})
@@ -604,7 +672,7 @@ class Modules(object):
else:
raise TypeError(("Failed to read '%s' item in config,"
" unknown type %s") %
- (item, type_utils.obj_name(item)))
+ (item, type_utils.obj_name(item)))
return module_list
def _fixup_modules(self, raw_mods):
@@ -657,7 +725,19 @@ class Modules(object):
which_ran.append(name)
# This name will affect the semaphore name created
run_name = "config-%s" % (name)
- cc.run(run_name, mod.handle, func_args, freq=freq)
+
+ desc = "running %s with frequency %s" % (run_name, freq)
+ myrep = events.ReportEventStack(
+ name=run_name, description=desc, parent=self.reporter)
+
+ with myrep:
+ ran, _r = cc.run(run_name, mod.handle, func_args,
+ freq=freq)
+ if ran:
+ myrep.message = "%s ran successfully" % run_name
+ else:
+ myrep.message = "%s previously ran" % run_name
+
except Exception as e:
util.logexc(LOG, "Running module %s (%s) failed", name, mod)
failures.append((name, e))
@@ -699,8 +779,8 @@ class Modules(object):
if skipped:
LOG.info("Skipping modules %s because they are not verified "
- "on distro '%s'. To run anyway, add them to "
- "'unverified_modules' in config.", skipped, d_name)
+ "on distro '%s'. To run anyway, add them to "
+ "'unverified_modules' in config.", skipped, d_name)
if forced:
LOG.info("running unverified_modules: %s", forced)
@@ -725,3 +805,36 @@ def fetch_base_config():
base_cfgs.append(default_cfg)
return util.mergemanydict(base_cfgs)
+
+
+def _pkl_store(obj, fname):
+ try:
+ pk_contents = pickle.dumps(obj)
+ except Exception:
+ util.logexc(LOG, "Failed pickling datasource %s", obj)
+ return False
+ try:
+ util.write_file(fname, pk_contents, omode="wb", mode=0o400)
+ except Exception:
+ util.logexc(LOG, "Failed pickling datasource to %s", fname)
+ return False
+ return True
+
+
+def _pkl_load(fname):
+ pickle_contents = None
+ try:
+ pickle_contents = util.load_file(fname, decode=False)
+ except Exception as e:
+ if os.path.isfile(fname):
+ LOG.warn("failed loading pickle in %s: %s" % (fname, e))
+ pass
+
+ # This is allowed so just return nothing successfully loaded...
+ if not pickle_contents:
+ return None
+ try:
+ return pickle.loads(pickle_contents)
+ except Exception:
+ util.logexc(LOG, "Failed loading pickled blob from %s", fname)
+ return None
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index 4cd3f13d..a9231482 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -137,7 +137,7 @@ def render_from_file(fn, params):
return renderer(content, params)
-def render_to_file(fn, outfn, params, mode=0644):
+def render_to_file(fn, outfn, params, mode=0o644):
contents = render_from_file(fn, params)
util.write_file(outfn, contents, mode=mode)
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
index cc3d9495..b93efd6a 100644
--- a/cloudinit/type_utils.py
+++ b/cloudinit/type_utils.py
@@ -22,11 +22,31 @@
import types
+import six
+
+
+if six.PY3:
+ _NAME_TYPES = (
+ types.ModuleType,
+ types.FunctionType,
+ types.LambdaType,
+ type,
+ )
+else:
+ _NAME_TYPES = (
+ types.TypeType,
+ types.ModuleType,
+ types.FunctionType,
+ types.LambdaType,
+ types.ClassType,
+ )
+
def obj_name(obj):
- if isinstance(obj, (types.TypeType,
- types.ModuleType,
- types.FunctionType,
- types.LambdaType)):
- return str(obj.__name__)
- return obj_name(obj.__class__)
+ if isinstance(obj, _NAME_TYPES):
+ return six.text_type(obj.__name__)
+ else:
+ if not hasattr(obj, '__class__'):
+ return repr(obj)
+ else:
+ return obj_name(obj.__class__)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 3074dd08..936f7da5 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -20,21 +20,33 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import httplib
+import json
+import os
+import requests
+import six
import time
-import urllib
-import requests
+from email.utils import parsedate
+from functools import partial
from requests import exceptions
+import oauthlib.oauth1 as oauth1
-from urlparse import (urlparse, urlunparse)
+from six.moves.urllib.parse import (
+ urlparse, urlunparse,
+ quote as urlquote)
from cloudinit import log as logging
from cloudinit import version
LOG = logging.getLogger(__name__)
-NOT_FOUND = httplib.NOT_FOUND
+if six.PY2:
+ import httplib
+ NOT_FOUND = httplib.NOT_FOUND
+else:
+ import http.client
+ NOT_FOUND = http.client.NOT_FOUND
+
# Check if requests has ssl support (added in requests >= 0.8.8)
SSL_ENABLED = False
@@ -70,7 +82,7 @@ def combine_url(base, *add_ons):
path = url_parsed[2]
if path and not path.endswith("/"):
path += "/"
- path += urllib.quote(str(add_on), safe="/:")
+ path += urlquote(str(add_on), safe="/:")
url_parsed[2] = path
return urlunparse(url_parsed)
@@ -135,17 +147,18 @@ class UrlResponse(object):
return self._response.status_code
def __str__(self):
- return self.contents
+ return self._response.text
class UrlError(IOError):
- def __init__(self, cause, code=None, headers=None):
+ def __init__(self, cause, code=None, headers=None, url=None):
IOError.__init__(self, str(cause))
self.cause = cause
self.code = code
self.headers = headers
if self.headers is None:
self.headers = {}
+ self.url = url
def _get_ssl_args(url, ssl_details):
@@ -198,10 +211,14 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
manual_tries = 1
if retries:
manual_tries = max(int(retries) + 1, 1)
- if not headers:
- headers = {
- 'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
- }
+
+ def_headers = {
+ 'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
+ }
+ if headers:
+ def_headers.update(headers)
+ headers = def_headers
+
if not headers_cb:
def _cb(url):
return headers
@@ -235,18 +252,21 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# attrs
return UrlResponse(r)
except exceptions.RequestException as e:
- if (isinstance(e, (exceptions.HTTPError))
- and hasattr(e, 'response') # This appeared in v 0.10.8
- and hasattr(e.response, 'status_code')):
+ if (isinstance(e, (exceptions.HTTPError)) and
+ hasattr(e, 'response') and # This appeared in v 0.10.8
+ hasattr(e.response, 'status_code')):
excps.append(UrlError(e, code=e.response.status_code,
- headers=e.response.headers))
+ headers=e.response.headers,
+ url=url))
else:
- excps.append(UrlError(e))
+ excps.append(UrlError(e, url=url))
if SSL_ENABLED and isinstance(e, exceptions.SSLError):
# ssl exceptions are not going to get fixed by waiting a
# few seconds
break
- if exception_cb and not exception_cb(req_args.copy(), excps[-1]):
+ if exception_cb and exception_cb(req_args.copy(), excps[-1]):
+ # if an exception callback was given it should return None
+ # a true-ish value means to break and re-raise the exception
break
if i + 1 < manual_tries and sec_between > 0:
LOG.debug("Please wait %s seconds while we wait to try again",
@@ -313,7 +333,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
timeout = int((start_time + max_wait) - now)
reason = ""
- e = None
+ url_exc = None
try:
if headers_cb is not None:
headers = headers_cb(url)
@@ -324,18 +344,20 @@ def wait_for_url(urls, max_wait=None, timeout=None,
check_status=False)
if not response.contents:
reason = "empty response [%s]" % (response.code)
- e = UrlError(ValueError(reason),
- code=response.code, headers=response.headers)
+ url_exc = UrlError(ValueError(reason), code=response.code,
+ headers=response.headers, url=url)
elif not response.ok():
reason = "bad status code [%s]" % (response.code)
- e = UrlError(ValueError(reason),
- code=response.code, headers=response.headers)
+ url_exc = UrlError(ValueError(reason), code=response.code,
+ headers=response.headers, url=url)
else:
return url
except UrlError as e:
reason = "request error [%s]" % e
+ url_exc = e
except Exception as e:
reason = "unexpected error [%s]" % e
+ url_exc = e
time_taken = int(time.time() - start_time)
status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,
@@ -347,7 +369,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
# This can be used to alter the headers that will be sent
# in the future, for example this is what the MAAS datasource
# does.
- exception_cb(msg=status_msg, exception=e)
+ exception_cb(msg=status_msg, exception=url_exc)
if timeup(max_wait, start_time):
break
@@ -358,3 +380,129 @@ def wait_for_url(urls, max_wait=None, timeout=None,
time.sleep(sleep_time)
return False
+
+
+class OauthUrlHelper(object):
+ def __init__(self, consumer_key=None, token_key=None,
+ token_secret=None, consumer_secret=None,
+ skew_data_file="/run/oauth_skew.json"):
+ self.consumer_key = consumer_key
+ self.consumer_secret = consumer_secret or ""
+ self.token_key = token_key
+ self.token_secret = token_secret
+ self.skew_data_file = skew_data_file
+ self._do_oauth = True
+ self.skew_change_limit = 5
+ required = (self.token_key, self.token_secret, self.consumer_key)
+ if not any(required):
+ self._do_oauth = False
+ elif not all(required):
+ raise ValueError("all or none of token_key, token_secret, or "
+ "consumer_key can be set")
+
+ old = self.read_skew_file()
+ self.skew_data = old or {}
+
+ def read_skew_file(self):
+ if self.skew_data_file and os.path.isfile(self.skew_data_file):
+ with open(self.skew_data_file, mode="r") as fp:
+ return json.load(fp)
+ return None
+
+ def update_skew_file(self, host, value):
+ # this is not atomic
+ if not self.skew_data_file:
+ return
+ cur = self.read_skew_file()
+ if cur is None:
+ cur = {}
+ cur[host] = value
+ with open(self.skew_data_file, mode="w") as fp:
+ fp.write(json.dumps(cur))
+
+ def exception_cb(self, msg, exception):
+ if not (isinstance(exception, UrlError) and
+ (exception.code == 403 or exception.code == 401)):
+ return
+
+ if 'date' not in exception.headers:
+ LOG.warn("Missing header 'date' in %s response", exception.code)
+ return
+
+ date = exception.headers['date']
+ try:
+ remote_time = time.mktime(parsedate(date))
+ except Exception as e:
+ LOG.warn("Failed to convert datetime '%s': %s", date, e)
+ return
+
+ skew = int(remote_time - time.time())
+ host = urlparse(exception.url).netloc
+ old_skew = self.skew_data.get(host, 0)
+ if abs(old_skew - skew) > self.skew_change_limit:
+ self.update_skew_file(host, skew)
+ LOG.warn("Setting oauth clockskew for %s to %d", host, skew)
+ self.skew_data[host] = skew
+
+ return
+
+ def headers_cb(self, url):
+ if not self._do_oauth:
+ return {}
+
+ timestamp = None
+ host = urlparse(url).netloc
+ if self.skew_data and host in self.skew_data:
+ timestamp = int(time.time()) + self.skew_data[host]
+
+ return oauth_headers(
+ url=url, consumer_key=self.consumer_key,
+ token_key=self.token_key, token_secret=self.token_secret,
+ consumer_secret=self.consumer_secret, timestamp=timestamp)
+
+ def _wrapped(self, wrapped_func, args, kwargs):
+ kwargs['headers_cb'] = partial(
+ self._headers_cb, kwargs.get('headers_cb'))
+ kwargs['exception_cb'] = partial(
+ self._exception_cb, kwargs.get('exception_cb'))
+ return wrapped_func(*args, **kwargs)
+
+ def wait_for_url(self, *args, **kwargs):
+ return self._wrapped(wait_for_url, args, kwargs)
+
+ def readurl(self, *args, **kwargs):
+ return self._wrapped(readurl, args, kwargs)
+
+ def _exception_cb(self, extra_exception_cb, msg, exception):
+ ret = None
+ try:
+ if extra_exception_cb:
+ ret = extra_exception_cb(msg, exception)
+ finally:
+ self.exception_cb(msg, exception)
+ return ret
+
+ def _headers_cb(self, extra_headers_cb, url):
+ headers = {}
+ if extra_headers_cb:
+ headers = extra_headers_cb(url)
+ headers.update(self.headers_cb(url))
+ return headers
+
+
+def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
+ timestamp=None):
+ if timestamp:
+ timestamp = str(timestamp)
+ else:
+ timestamp = None
+
+ client = oauth1.Client(
+ consumer_key,
+ client_secret=consumer_secret,
+ resource_owner_key=token_key,
+ resource_owner_secret=token_secret,
+ signature_method=oauth1.SIGNATURE_PLAINTEXT,
+ timestamp=timestamp)
+ uri, signed_headers, body = client.sign(url)
+ return signed_headers
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index de6487d8..f7c5787c 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -22,13 +22,13 @@
import os
-import email
-
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.mime.text import MIMEText
+import six
+
from cloudinit import handlers
from cloudinit import log as logging
from cloudinit import util
@@ -49,6 +49,7 @@ INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
ARCHIVE_TYPES = ["text/cloud-config-archive"]
UNDEF_TYPE = "text/plain"
ARCHIVE_UNDEF_TYPE = "text/cloud-config"
+ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
# This seems to hit most of the gzip possible content types.
DECOMP_TYPES = [
@@ -106,7 +107,7 @@ class UserDataProcessor(object):
ctype = None
ctype_orig = part.get_content_type()
- payload = part.get_payload(decode=True)
+ payload = util.fully_decoded_payload(part)
was_compressed = False
# When the message states it is of a gzipped content type ensure
@@ -235,9 +236,9 @@ class UserDataProcessor(object):
resp = util.read_file_or_url(include_url,
ssl_details=self.ssl_details)
if include_once_on and resp.ok():
- util.write_file(include_once_fn, str(resp), mode=0600)
+ util.write_file(include_once_fn, resp.contents, mode=0o600)
if resp.ok():
- content = str(resp)
+ content = resp.contents
else:
LOG.warn(("Fetching from %s resulted in"
" a invalid http code of %s"),
@@ -256,7 +257,7 @@ class UserDataProcessor(object):
# filename and type not be present
# or
# scalar(payload)
- if isinstance(ent, (str, basestring)):
+ if isinstance(ent, six.string_types):
ent = {'content': ent}
if not isinstance(ent, (dict)):
# TODO(harlowja) raise?
@@ -265,11 +266,15 @@ class UserDataProcessor(object):
content = ent.get('content', '')
mtype = ent.get('type')
if not mtype:
- mtype = handlers.type_from_starts_with(content,
- ARCHIVE_UNDEF_TYPE)
+ default = ARCHIVE_UNDEF_TYPE
+ if isinstance(content, six.binary_type):
+ default = ARCHIVE_UNDEF_BINARY_TYPE
+ mtype = handlers.type_from_starts_with(content, default)
maintype, subtype = mtype.split('/', 1)
if maintype == "text":
+ if isinstance(content, six.binary_type):
+ content = content.decode()
msg = MIMEText(content, _subtype=subtype)
else:
msg = MIMEBase(maintype, subtype)
@@ -334,10 +339,10 @@ def convert_string(raw_data, headers=None):
raw_data = ''
if not headers:
headers = {}
- data = util.decomp_gzip(raw_data)
+ data = util.decode_binary(util.decomp_gzip(raw_data))
if "mime-version:" in data[0:4096].lower():
- msg = email.message_from_string(data)
- for (key, val) in headers.iteritems():
+ msg = util.message_from_string(data)
+ for (key, val) in headers.items():
_replace_header(msg, key, val)
else:
mtype = headers.get(CONTENT_TYPE, NOT_MULTIPART_TYPE)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index bf8e7d80..0d21e11b 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -20,11 +20,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from StringIO import StringIO
-
import contextlib
import copy as obj_copy
import ctypes
+import email
import errno
import glob
import grp
@@ -45,8 +44,11 @@ import subprocess
import sys
import tempfile
import time
-import urlparse
+from base64 import b64decode, b64encode
+from six.moves.urllib import parse as urlparse
+
+import six
import yaml
from cloudinit import importer
@@ -69,8 +71,93 @@ FN_REPLACEMENTS = {
}
FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
+TRUE_STRINGS = ('true', '1', 'on', 'yes')
+FALSE_STRINGS = ('off', '0', 'no', 'false')
+
+
# Helper utils to see if running in a container
-CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
+CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
+ ['running-in-container'],
+ ['lxc-is-container'])
+
+PROC_CMDLINE = None
+
+
+def decode_binary(blob, encoding='utf-8'):
+ # Converts a binary type into a text type using given encoding.
+ if isinstance(blob, six.text_type):
+ return blob
+ return blob.decode(encoding)
+
+
+def encode_text(text, encoding='utf-8'):
+ # Converts a text string into a binary type using given encoding.
+ if isinstance(text, six.binary_type):
+ return text
+ return text.encode(encoding)
+
+
+def b64d(source):
+ # Base64 decode some data, accepting bytes or unicode/str, and returning
+ # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
+ decoded = b64decode(source)
+ try:
+ return decoded.decode('utf-8')
+ except UnicodeDecodeError:
+ return decoded
+
+
+def b64e(source):
+ # Base64 encode some data, accepting bytes or unicode/str, and returning
+ # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
+ if not isinstance(source, bytes):
+ source = source.encode('utf-8')
+ return b64encode(source).decode('utf-8')
+
+
+def fully_decoded_payload(part):
+ # In Python 3, decoding the payload will ironically hand us a bytes object.
+ # 'decode' means to decode according to Content-Transfer-Encoding, not
+ # according to any charset in the Content-Type. So, if we end up with
+ # bytes, first try to decode to str via CT charset, and failing that, try
+ # utf-8 using surrogate escapes.
+ cte_payload = part.get_payload(decode=True)
+ if (six.PY3 and
+ part.get_content_maintype() == 'text' and
+ isinstance(cte_payload, bytes)):
+ charset = part.get_charset()
+ if charset and charset.input_codec:
+ encoding = charset.input_codec
+ else:
+ encoding = 'utf-8'
+ return cte_payload.decode(encoding, errors='surrogateescape')
+ return cte_payload
+
+
+# Path for DMI Data
+DMI_SYS_PATH = "/sys/class/dmi/id"
+
+# dmidecode and /sys/class/dmi/id/* use different names for the same value,
+# this allows us to refer to them by one canonical name
+DMIDECODE_TO_DMI_SYS_MAPPING = {
+ 'baseboard-asset-tag': 'board_asset_tag',
+ 'baseboard-manufacturer': 'board_vendor',
+ 'baseboard-product-name': 'board_name',
+ 'baseboard-serial-number': 'board_serial',
+ 'baseboard-version': 'board_version',
+ 'bios-release-date': 'bios_date',
+ 'bios-vendor': 'bios_vendor',
+ 'bios-version': 'bios_version',
+ 'chassis-asset-tag': 'chassis_asset_tag',
+ 'chassis-manufacturer': 'chassis_vendor',
+ 'chassis-serial-number': 'chassis_serial',
+ 'chassis-version': 'chassis_version',
+ 'system-manufacturer': 'sys_vendor',
+ 'system-product-name': 'product_name',
+ 'system-serial-number': 'product_serial',
+ 'system-uuid': 'product_uuid',
+ 'system-version': 'product_version',
+}
class ProcessExecutionError(IOError):
@@ -95,7 +182,7 @@ class ProcessExecutionError(IOError):
else:
self.description = description
- if not isinstance(exit_code, (long, int)):
+ if not isinstance(exit_code, six.integer_types):
self.exit_code = '-'
else:
self.exit_code = exit_code
@@ -124,6 +211,9 @@ class ProcessExecutionError(IOError):
'reason': self.reason,
}
IOError.__init__(self, message)
+ # For backward compatibility with Python 2.
+ if not hasattr(self, 'message'):
+ self.message = message
class SeLinuxGuard(object):
@@ -151,7 +241,8 @@ class SeLinuxGuard(object):
path = os.path.realpath(self.path)
# path should be a string, not unicode
- path = str(path)
+ if six.PY2:
+ path = str(path)
try:
stats = os.lstat(path)
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
@@ -209,10 +300,10 @@ def fork_cb(child_cb, *args, **kwargs):
def is_true(val, addons=None):
if isinstance(val, (bool)):
return val is True
- check_set = ['true', '1', 'on', 'yes']
+ check_set = TRUE_STRINGS
if addons:
- check_set = check_set + addons
- if str(val).lower().strip() in check_set:
+ check_set = list(check_set) + addons
+ if six.text_type(val).lower().strip() in check_set:
return True
return False
@@ -220,10 +311,10 @@ def is_true(val, addons=None):
def is_false(val, addons=None):
if isinstance(val, (bool)):
return val is False
- check_set = ['off', '0', 'no', 'false']
+ check_set = FALSE_STRINGS
if addons:
- check_set = check_set + addons
- if str(val).lower().strip() in check_set:
+ check_set = list(check_set) + addons
+ if six.text_type(val).lower().strip() in check_set:
return True
return False
@@ -241,7 +332,7 @@ def translate_bool(val, addons=None):
def rand_str(strlen=32, select_from=None):
if not select_from:
- select_from = string.letters + string.digits
+ select_from = string.ascii_letters + string.digits
return "".join([random.choice(select_from) for _x in range(0, strlen)])
@@ -273,7 +364,7 @@ def uniq_merge_sorted(*lists):
def uniq_merge(*lists):
combined_list = []
for a_list in lists:
- if isinstance(a_list, (str, basestring)):
+ if isinstance(a_list, six.string_types):
a_list = a_list.strip().split(",")
# Kickout the empty ones
a_list = [a for a in a_list if len(a)]
@@ -282,7 +373,7 @@ def uniq_merge(*lists):
def clean_filename(fn):
- for (k, v) in FN_REPLACEMENTS.iteritems():
+ for (k, v) in FN_REPLACEMENTS.items():
fn = fn.replace(k, v)
removals = []
for k in fn:
@@ -294,16 +385,19 @@ def clean_filename(fn):
return fn
-def decomp_gzip(data, quiet=True):
+def decomp_gzip(data, quiet=True, decode=True):
try:
- buf = StringIO(str(data))
+ buf = six.BytesIO(encode_text(data))
with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
- return gh.read()
+ if decode:
+ return decode_binary(gh.read())
+ else:
+ return gh.read()
except Exception as e:
if quiet:
return data
else:
- raise DecompressionError(str(e))
+ raise DecompressionError(six.text_type(e))
def extract_usergroup(ug_pair):
@@ -341,7 +435,7 @@ def multi_log(text, console=True, stderr=True,
if console:
conpath = "/dev/console"
if os.path.exists(conpath):
- with open(conpath, 'wb') as wfh:
+ with open(conpath, 'w') as wfh:
wfh.write(text)
wfh.flush()
else:
@@ -362,7 +456,7 @@ def multi_log(text, console=True, stderr=True,
def load_json(text, root_types=(dict,)):
- decoded = json.loads(text)
+ decoded = json.loads(decode_binary(text))
if not isinstance(decoded, tuple(root_types)):
expected_types = ", ".join([str(t) for t in root_types])
raise TypeError("(%s) root types expected, got %s instead"
@@ -394,7 +488,7 @@ def get_cfg_option_str(yobj, key, default=None):
if key not in yobj:
return default
val = yobj[key]
- if not isinstance(val, (str, basestring)):
+ if not isinstance(val, six.string_types):
val = str(val)
return val
@@ -433,7 +527,7 @@ def get_cfg_option_list(yobj, key, default=None):
if isinstance(val, (list)):
cval = [v for v in val]
return cval
- if not isinstance(val, (basestring)):
+ if not isinstance(val, six.string_types):
val = str(val)
return [val]
@@ -520,7 +614,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
def make_url(scheme, host, port=None,
- path='', params='', query='', fragment=''):
+ path='', params='', query='', fragment=''):
pieces = []
pieces.append(scheme or '')
@@ -687,12 +781,13 @@ def read_file_or_url(url, timeout=5, retries=10,
LOG.warn("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
try:
- contents = load_file(file_path)
+ contents = load_file(file_path, decode=False)
except IOError as e:
code = e.errno
if e.errno == errno.ENOENT:
code = url_helper.NOT_FOUND
- raise url_helper.UrlError(cause=e, code=code, headers=None)
+ raise url_helper.UrlError(cause=e, code=code, headers=None,
+ url=url)
return url_helper.FileResponse(file_path, contents=contents)
else:
return url_helper.readurl(url,
@@ -708,11 +803,11 @@ def read_file_or_url(url, timeout=5, retries=10,
def load_yaml(blob, default=None, allowed=(dict,)):
loaded = default
+ blob = decode_binary(blob)
try:
- blob = str(blob)
- LOG.debug(("Attempting to load yaml from string "
- "of length %s with allowed root types %s"),
- len(blob), allowed)
+ LOG.debug("Attempting to load yaml from string "
+ "of length %s with allowed root types %s",
+ len(blob), allowed)
converted = safeyaml.load(blob)
if not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
@@ -746,14 +841,12 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
md = None
if md_resp.ok():
- md_str = str(md_resp)
- md = load_yaml(md_str, default={})
+ md = load_yaml(decode_binary(md_resp.contents), default={})
ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
ud = None
if ud_resp.ok():
- ud_str = str(ud_resp)
- ud = ud_str
+ ud = ud_resp.contents
return (md, ud)
@@ -784,10 +877,10 @@ def read_conf_with_confd(cfgfile):
if "conf_d" in cfg:
confd = cfg['conf_d']
if confd:
- if not isinstance(confd, (str, basestring)):
+ if not isinstance(confd, six.string_types):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
- (cfgfile, type_utils.obj_name(confd)))
+ (cfgfile, type_utils.obj_name(confd)))
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
@@ -905,7 +998,7 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
def get_cmdline_url(names=('cloud-config-url', 'url'),
- starts="#cloud-config", cmdline=None):
+ starts=b"#cloud-config", cmdline=None):
if cmdline is None:
cmdline = get_cmdline()
@@ -921,8 +1014,10 @@ def get_cmdline_url(names=('cloud-config-url', 'url'),
return (None, None, None)
resp = read_file_or_url(url)
- if resp.contents.startswith(starts) and resp.ok():
- return (key, url, str(resp))
+ # allow callers to pass starts as text when comparing to bytes contents
+ starts = encode_text(starts)
+ if resp.ok() and resp.contents.startswith(starts):
+ return (key, url, resp.contents)
return (key, url, None)
@@ -948,7 +1043,8 @@ def is_resolvable(name):
for iname in badnames:
try:
result = socket.getaddrinfo(iname, None, 0, 0,
- socket.SOCK_STREAM, socket.AI_CANONNAME)
+ socket.SOCK_STREAM,
+ socket.AI_CANONNAME)
badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
@@ -1016,7 +1112,7 @@ def close_stdin():
def find_devs_with(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+ tag=None, no_cache=False, path=None):
"""
find devices matching given criteria (via blkid)
criteria can be *one* of:
@@ -1076,9 +1172,9 @@ def uniq_list(in_list):
return out_list
-def load_file(fname, read_cb=None, quiet=False):
+def load_file(fname, read_cb=None, quiet=False, decode=True):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
- ofh = StringIO()
+ ofh = six.BytesIO()
try:
with open(fname, 'rb') as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
@@ -1089,17 +1185,35 @@ def load_file(fname, read_cb=None, quiet=False):
raise
contents = ofh.getvalue()
LOG.debug("Read %s bytes from %s", len(contents), fname)
- return contents
+ if decode:
+ return decode_binary(contents)
+ else:
+ return contents
def get_cmdline():
if 'DEBUG_PROC_CMDLINE' in os.environ:
- cmdline = os.environ["DEBUG_PROC_CMDLINE"]
+ return os.environ["DEBUG_PROC_CMDLINE"]
+
+ global PROC_CMDLINE
+ if PROC_CMDLINE is not None:
+ return PROC_CMDLINE
+
+ if is_container():
+ try:
+ contents = load_file("/proc/1/cmdline")
+ # replace nulls with space and drop trailing null
+ cmdline = contents.replace("\x00", " ")[:-1]
+ except Exception as e:
+ LOG.warn("failed reading /proc/1/cmdline: %s", e)
+ cmdline = ""
else:
try:
cmdline = load_file("/proc/cmdline").strip()
except:
cmdline = ""
+
+ PROC_CMDLINE = cmdline
return cmdline
@@ -1107,7 +1221,7 @@ def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
bytes_piped = 0
while True:
data = in_fh.read(chunk_size)
- if data == '':
+ if len(data) == 0:
break
else:
out_fh.write(data)
@@ -1213,13 +1327,20 @@ def logexc(log, msg, *args):
# coming out to a non-debug stream
if msg:
log.warn(msg, *args)
- # Debug gets the full trace
- log.debug(msg, exc_info=1, *args)
+ # Debug gets the full trace. However, nose has a bug whereby its
+ # logcapture plugin doesn't properly handle the case where there is no
+ # actual exception. To avoid tracebacks during the test suite then, we'll
+ # do the actual exc_info extraction here, and if there is no exception in
+ # flight, we'll just pass in None.
+ exc_info = sys.exc_info()
+ if exc_info == (None, None, None):
+ exc_info = None
+ log.debug(msg, exc_info=exc_info, *args)
def hash_blob(blob, routine, mlen=None):
hasher = hashlib.new(routine)
- hasher.update(blob)
+ hasher.update(encode_text(blob))
digest = hasher.hexdigest()
# Don't get to long now
if mlen is not None:
@@ -1250,7 +1371,7 @@ def rename(src, dest):
os.rename(src, dest)
-def ensure_dirs(dirlist, mode=0755):
+def ensure_dirs(dirlist, mode=0o755):
for d in dirlist:
ensure_dir(d, mode)
@@ -1264,7 +1385,7 @@ def read_write_cmdline_url(target_fn):
return
try:
if key and content:
- write_file(target_fn, content, mode=0600)
+ write_file(target_fn, content, mode=0o600)
LOG.debug(("Wrote to %s with contents of command line"
" url %s (len=%s)"), target_fn, url, len(content))
elif key and not content:
@@ -1280,8 +1401,7 @@ def yaml_dumps(obj, explicit_start=True, explicit_end=True):
indent=4,
explicit_start=explicit_start,
explicit_end=explicit_end,
- default_flow_style=False,
- allow_unicode=True)
+ default_flow_style=False)
def ensure_dir(path, mode=None):
@@ -1380,9 +1500,10 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
mounted = mounts()
with tempdir() as tmpd:
umount = False
- if device in mounted:
- mountpoint = mounted[device]['mountpoint']
+ if os.path.realpath(device) in mounted:
+ mountpoint = mounted[os.path.realpath(device)]['mountpoint']
else:
+ failure_reason = None
for mtype in mtypes:
mountpoint = None
try:
@@ -1409,10 +1530,10 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
except (IOError, OSError) as exc:
LOG.debug("Failed mount of '%s' as '%s': %s",
device, mtype, exc)
- pass
+ failure_reason = exc
if not mountpoint:
raise MountFailedError("Failed mounting %s to %s due to: %s" %
- (device, tmpd, exc))
+ (device, tmpd, failure_reason))
# Be nice and ensure it ends with a slash
if not mountpoint.endswith("/"):
@@ -1465,7 +1586,7 @@ def uptime():
try:
if os.path.exists("/proc/uptime"):
method = '/proc/uptime'
- contents = load_file("/proc/uptime").strip()
+ contents = load_file("/proc/uptime")
if contents:
uptime_str = contents.split()[0]
else:
@@ -1489,7 +1610,7 @@ def append_file(path, content):
write_file(path, content, omode="ab", mode=None)
-def ensure_file(path, mode=0644):
+def ensure_file(path, mode=0o644):
write_file(path, content='', omode="ab", mode=mode)
@@ -1507,7 +1628,7 @@ def chmod(path, mode):
os.chmod(path, real_mode)
-def write_file(filename, content, mode=0644, omode="wb"):
+def write_file(filename, content, mode=0o644, omode="wb"):
"""
Writes a file with the given content and sets the file mode as specified.
Resotres the SELinux context if possible.
@@ -1515,11 +1636,17 @@ def write_file(filename, content, mode=0644, omode="wb"):
@param filename: The full path of the file to write.
@param content: The content to write to the file.
@param mode: The filesystem mode to set on the file.
- @param omode: The open mode used when opening the file (r, rb, a, etc.)
+ @param omode: The open mode used when opening the file (w, wb, a, etc.)
"""
ensure_dir(os.path.dirname(filename))
- LOG.debug("Writing to %s - %s: [%s] %s bytes",
- filename, omode, mode, len(content))
+ if 'b' in omode.lower():
+ content = encode_text(content)
+ write_type = 'bytes'
+ else:
+ content = decode_binary(content)
+ write_type = 'characters'
+ LOG.debug("Writing to %s - %s: [%s] %s %s",
+ filename, omode, mode, len(content), write_type)
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
@@ -1561,9 +1688,12 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
stdout = subprocess.PIPE
stderr = subprocess.PIPE
stdin = subprocess.PIPE
- sp = subprocess.Popen(args, stdout=stdout,
- stderr=stderr, stdin=stdin,
- env=env, shell=shell)
+ kws = dict(stdout=stdout, stderr=stderr, stdin=stdin,
+ env=env, shell=shell)
+ if six.PY3:
+ # Use this so subprocess output will be (Python 3) str, not bytes.
+ kws['universal_newlines'] = True
+ sp = subprocess.Popen(args, **kws)
(out, err) = sp.communicate(data)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e)
@@ -1608,10 +1738,10 @@ def shellify(cmdlist, add_header=True):
if isinstance(args, list):
fixed = []
for f in args:
- fixed.append("'%s'" % (str(f).replace("'", escaped)))
+ fixed.append("'%s'" % (six.text_type(f).replace("'", escaped)))
content = "%s%s\n" % (content, ' '.join(fixed))
cmds_made += 1
- elif isinstance(args, (str, basestring)):
+ elif isinstance(args, six.string_types):
content = "%s%s\n" % (content, args)
cmds_made += 1
else:
@@ -1639,7 +1769,7 @@ def is_container():
try:
# try to run a helper program. if it returns true/zero
# then we're inside a container. otherwise, no
- subp([helper])
+ subp(helper)
return True
except (IOError, OSError):
pass
@@ -1722,7 +1852,7 @@ def expand_package_list(version_fmt, pkgs):
pkglist = []
for pkg in pkgs:
- if isinstance(pkg, basestring):
+ if isinstance(pkg, six.string_types):
pkglist.append(pkg)
continue
@@ -1950,7 +2080,7 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
ret = {}
for f in required + optional:
try:
- ret[f] = load_file(base + delim + f, quiet=False)
+ ret[f] = load_file(base + delim + f, quiet=False, decode=False)
except IOError as e:
if e.errno != errno.ENOENT:
raise
@@ -2011,3 +2141,86 @@ def human2bytes(size):
raise ValueError("'%s': cannot be negative" % size_in)
return int(num * mpliers[mplier])
+
+
+def _read_dmi_syspath(key):
+ """
+ Reads dmi data with from /sys/class/dmi/id
+ """
+ if key not in DMIDECODE_TO_DMI_SYS_MAPPING:
+ return None
+ mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key]
+ dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key)
+ LOG.debug("querying dmi data %s", dmi_key_path)
+ try:
+ if not os.path.exists(dmi_key_path):
+ LOG.debug("did not find %s", dmi_key_path)
+ return None
+
+ key_data = load_file(dmi_key_path, decode=False)
+ if not key_data:
+ LOG.debug("%s did not return any data", dmi_key_path)
+ return None
+
+ # uninitialized dmi values show as all \xff and /sys appends a '\n'.
+ # in that event, return a string of '.' in the same length.
+ if key_data == b'\xff' * (len(key_data) - 1) + b'\n':
+ key_data = b""
+
+ str_data = key_data.decode('utf8').strip()
+ LOG.debug("dmi data %s returned %s", dmi_key_path, str_data)
+ return str_data
+
+ except Exception:
+ logexc(LOG, "failed read of %s", dmi_key_path)
+ return None
+
+
+def _call_dmidecode(key, dmidecode_path):
+ """
+ Calls out to dmidecode to get the data out. This is mostly for supporting
+ OS's without /sys/class/dmi/id support.
+ """
+ try:
+ cmd = [dmidecode_path, "--string", key]
+ (result, _err) = subp(cmd)
+ LOG.debug("dmidecode returned '%s' for '%s'", result, key)
+ result = result.strip()
+ if result.replace(".", "") == "":
+ return ""
+ return result
+ except (IOError, OSError) as _err:
+ LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err.message)
+ return None
+
+
+def read_dmi_data(key):
+ """
+ Wrapper for reading DMI data.
+
+ This will do the following (returning the first that produces a
+ result):
+ 1) Use a mapping to translate `key` from dmidecode naming to
+ sysfs naming and look in /sys/class/dmi/... for a value.
+ 2) Use `key` as a sysfs key directly and look in /sys/class/dmi/...
+ 3) Fall-back to passing `key` to `dmidecode --string`.
+
+ If all of the above fail to find a value, None will be returned.
+ """
+ syspath_value = _read_dmi_syspath(key)
+ if syspath_value is not None:
+ return syspath_value
+
+ dmidecode_path = which('dmidecode')
+ if dmidecode_path:
+ return _call_dmidecode(key, dmidecode_path)
+
+ LOG.warn("did not find either path %s or dmidecode command",
+ DMI_SYS_PATH)
+ return None
+
+
+def message_from_string(string):
+ if sys.version_info[:2] < (2, 7):
+ return email.message_from_file(six.StringIO(string))
+ return email.message_from_string(string)