summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog12
-rwxr-xr-xbin/cloud-init60
-rw-r--r--cloudinit/config/cc_apt_configure.py114
-rw-r--r--cloudinit/config/cc_emit_upstart.py2
-rw-r--r--cloudinit/config/cc_lxd.py3
-rw-r--r--cloudinit/distros/__init__.py6
-rw-r--r--cloudinit/ec2_utils.py4
-rw-r--r--cloudinit/helpers.py22
-rw-r--r--cloudinit/net/__init__.py252
-rw-r--r--cloudinit/net/cmdline.py18
-rw-r--r--cloudinit/net/eni.py90
-rw-r--r--cloudinit/net/network_state.py12
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py19
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py105
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py78
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py42
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py21
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py577
-rw-r--r--cloudinit/sources/__init__.py33
-rw-r--r--cloudinit/sources/helpers/openstack.py80
-rw-r--r--cloudinit/stages.py103
-rw-r--r--cloudinit/templater.py5
-rw-r--r--cloudinit/util.py10
-rw-r--r--doc/examples/cloud-config.txt127
-rw-r--r--doc/merging.rst42
-rw-r--r--doc/rtd/topics/merging.rst6
-rwxr-xr-xpackages/bddeb2
-rwxr-xr-xsetup.py1
-rwxr-xr-xsystemd/cloud-init-generator3
-rw-r--r--test-requirements.txt3
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py125
-rw-r--r--tests/unittests/test_datasource/test_openstack.py32
-rw-r--r--tests/unittests/test_datasource/test_smartos.py290
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list.py165
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source.py551
-rw-r--r--tests/unittests/test_merging.py4
-rw-r--r--udev/79-cloud-init-net-wait.rules10
-rwxr-xr-xudev/cloud-init-wait70
38 files changed, 2225 insertions, 874 deletions
diff --git a/ChangeLog b/ChangeLog
index e06578fc..aa3315b8 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -109,6 +109,18 @@
[Wido den Hollander]
- Paths: fix instance path if datasource's id has a '/'. (LP: #1575938)
[Robert Jennings]
+ - Ec2: do not retry requests for user-data path on 404.
+ - settings on the kernel command line (cc:) override all local settings
+ rather than only those in /etc/cloud/cloud.cfg (LP: #1582323)
+ - Improve merging documentation [Daniel Watkins]
+ - apt sources: support inserting key/key-id only, custom sources.list,
+ long gpg key fingerprints with spaces, and dictionary format (LP: #1574113)
+ - SmartOS: datasource improvements and support for metadata service
+ providing networking information.
+ - Datasources: centrally handle 'dsmode' and no longer require datasources
+ to "pass" if modules_init should be executed with network access.
+ - ConfigDrive: improved support for networking information from
+ a network_data.json or older interfaces formated network_config.
0.7.6:
- open 0.7.6
diff --git a/bin/cloud-init b/bin/cloud-init
index 5857af32..21c3a684 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -211,27 +211,27 @@ def main_init(name, args):
util.logexc(LOG, "Failed to initialize, likely bad things to come!")
# Stage 4
path_helper = init.paths
- if not args.local:
+ mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK
+
+ if mode == sources.DSMODE_NETWORK:
existing = "trust"
sys.stderr.write("%s\n" % (netinfo.debug_info()))
LOG.debug(("Checking to see if files that we need already"
" exist from a previous run that would allow us"
" to stop early."))
+ # no-net is written by upstart cloud-init-nonet when network failed
+ # to come up
stop_files = [
os.path.join(path_helper.get_cpath("data"), "no-net"),
- path_helper.get_ipath_cur("obj_pkl"),
]
existing_files = []
for fn in stop_files:
- try:
- c = util.load_file(fn)
- if len(c):
- existing_files.append((fn, len(c)))
- except Exception:
- pass
+ if os.path.isfile(fn):
+ existing_files.append(fn)
+
if existing_files:
- LOG.debug("Exiting early due to the existence of %s files",
- existing_files)
+ LOG.debug("[%s] Exiting. stop file %s existed",
+ mode, existing_files)
return (None, [])
else:
LOG.debug("Execution continuing, no previous run detected that"
@@ -248,34 +248,50 @@ def main_init(name, args):
# Stage 5
try:
init.fetch(existing=existing)
+ # if in network mode, and the datasource is local
+ # then work was done at that stage.
+ if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
+ LOG.debug("[%s] Exiting. datasource %s in local mode",
+ mode, init.datasource)
+ return (None, [])
except sources.DataSourceNotFoundException:
# In the case of 'cloud-init init' without '--local' it is a bit
# more likely that the user would consider it failure if nothing was
# found. When using upstart it will also mentions job failure
# in console log if exit code is != 0.
- if args.local:
+ if mode == sources.DSMODE_LOCAL:
LOG.debug("No local datasource found")
else:
util.logexc(LOG, ("No instance datasource found!"
" Likely bad things to come!"))
if not args.force:
- init.apply_network_config()
- if args.local:
+ init.apply_network_config(bring_up=not args.local)
+ LOG.debug("[%s] Exiting without datasource in local mode", mode)
+ if mode == sources.DSMODE_LOCAL:
return (None, [])
else:
return (None, ["No instance datasource found."])
-
- if args.local:
- if not init.ds_restored:
- # if local mode and the datasource was not restored from cache
- # (this is not first boot) then apply networking.
- init.apply_network_config()
else:
- LOG.debug("skipping networking config from restored datasource.")
+ LOG.debug("[%s] barreling on in force mode without datasource",
+ mode)
# Stage 6
iid = init.instancify()
- LOG.debug("%s will now be targeting instance id: %s", name, iid)
+ LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
+ mode, name, iid, init.is_new_instance())
+
+ init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))
+
+ if mode == sources.DSMODE_LOCAL:
+ if init.datasource.dsmode != mode:
+ LOG.debug("[%s] Exiting. datasource %s not in local mode.",
+ mode, init.datasource)
+ return (init.datasource, [])
+ else:
+ LOG.debug("[%s] %s is in local mode, will apply init modules now.",
+ mode, init.datasource)
+
+ # update fully realizes user-data (pulling in #include if necessary)
init.update()
# Stage 7
try:
@@ -528,7 +544,7 @@ def status_wrapper(name, args, data_d=None, link_d=None):
v1[mode]['errors'] = [str(e) for e in errors]
except Exception as e:
- util.logexc(LOG, "failed of stage %s", mode)
+ util.logexc(LOG, "failed stage %s", mode)
print_exc("failed run of stage %s" % mode)
v1[mode]['errors'] = [str(e)]
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index e3fadc12..7a9777c0 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -40,9 +40,9 @@ EXPORT_GPG_KEYID = """
k=${1} ks=${2};
exec 2>/dev/null
[ -n "$k" ] || exit 1;
- armour=$(gpg --list-keys --armour "${k}")
+ armour=$(gpg --export --armour "${k}")
if [ -z "${armour}" ]; then
- gpg --keyserver ${ks} --recv $k >/dev/null &&
+ gpg --keyserver ${ks} --recv "${k}" >/dev/null &&
armour=$(gpg --export --armour "${k}") &&
gpg --batch --yes --delete-keys "${k}"
fi
@@ -70,7 +70,7 @@ def handle(name, cfg, cloud, log, _args):
if not util.get_cfg_option_bool(cfg,
'apt_preserve_sources_list', False):
- generate_sources_list(release, mirrors, cloud, log)
+ generate_sources_list(cfg, release, mirrors, cloud, log)
old_mirrors = cfg.get('apt_old_mirrors',
{"primary": "archive.ubuntu.com/ubuntu",
"security": "security.ubuntu.com/ubuntu"})
@@ -149,7 +149,17 @@ def get_release():
return stdout.strip()
-def generate_sources_list(codename, mirrors, cloud, log):
+def generate_sources_list(cfg, codename, mirrors, cloud, log):
+ params = {'codename': codename}
+ for k in mirrors:
+ params[k] = mirrors[k]
+
+ custtmpl = cfg.get('apt_custom_sources_list', None)
+ if custtmpl is not None:
+ templater.render_string_to_file(custtmpl,
+ '/etc/apt/sources.list', params)
+ return
+
template_fn = cloud.get_template_filename('sources.list.%s' %
(cloud.distro.name))
if not template_fn:
@@ -158,12 +168,60 @@ def generate_sources_list(codename, mirrors, cloud, log):
log.warn("No template found, not rendering /etc/apt/sources.list")
return
- params = {'codename': codename}
- for k in mirrors:
- params[k] = mirrors[k]
templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
+def add_key_raw(key):
+ """
+ actual adding of a key as defined in key argument
+ to the system
+ """
+ try:
+ util.subp(('apt-key', 'add', '-'), key)
+ except util.ProcessExecutionError:
+ raise Exception('failed add key')
+
+
+def add_key(ent):
+ """
+ add key to the system as defined in ent (if any)
+ supports raw keys or keyid's
+ The latter will as a first step fetch the raw key from a keyserver
+ """
+ if 'keyid' in ent and 'key' not in ent:
+ keyserver = "keyserver.ubuntu.com"
+ if 'keyserver' in ent:
+ keyserver = ent['keyserver']
+ ent['key'] = getkeybyid(ent['keyid'], keyserver)
+
+ if 'key' in ent:
+ add_key_raw(ent['key'])
+
+
+def convert_to_new_format(srclist):
+ """convert_to_new_format
+ convert the old list based format to the new dict based one
+ """
+ srcdict = {}
+ if isinstance(srclist, list):
+ for srcent in srclist:
+ if 'filename' not in srcent:
+ # file collides for multiple !filename cases for compatibility
+ # yet we need them all processed, so not same dictionary key
+ srcent['filename'] = "cloud_config_sources.list"
+ key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
+ else:
+ # all with filename use that as key (matching new format)
+ key = srcent['filename']
+ srcdict[key] = srcent
+ elif isinstance(srclist, dict):
+ srcdict = srclist
+ else:
+ raise ValueError("unknown apt_sources format")
+
+ return srcdict
+
+
def add_sources(srclist, template_params=None, aa_repo_match=None):
"""
add entries in /etc/apt/sources.list.d for each abbreviated
@@ -178,14 +236,29 @@ def add_sources(srclist, template_params=None, aa_repo_match=None):
return False
errorlist = []
- for ent in srclist:
+ srcdict = convert_to_new_format(srclist)
+
+ for filename in srcdict:
+ ent = srcdict[filename]
+ if 'filename' not in ent:
+ ent['filename'] = filename
+
+ # keys can be added without specifying a source
+ try:
+ add_key(ent)
+ except Exception as detail:
+ errorlist.append([ent, detail])
+
if 'source' not in ent:
errorlist.append(["", "missing source"])
continue
-
source = ent['source']
source = templater.render_string(source, template_params)
+ if not ent['filename'].startswith(os.path.sep):
+ ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
+ ent['filename'])
+
if aa_repo_match(source):
try:
util.subp(["add-apt-repository", source])
@@ -194,29 +267,6 @@ def add_sources(srclist, template_params=None, aa_repo_match=None):
("add-apt-repository failed. " + str(e))])
continue
- if 'filename' not in ent:
- ent['filename'] = 'cloud_config_sources.list'
-
- if not ent['filename'].startswith("/"):
- ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
- ent['filename'])
-
- if ('keyid' in ent and 'key' not in ent):
- ks = "keyserver.ubuntu.com"
- if 'keyserver' in ent:
- ks = ent['keyserver']
- try:
- ent['key'] = getkeybyid(ent['keyid'], ks)
- except Exception:
- errorlist.append([source, "failed to get key from %s" % ks])
- continue
-
- if 'key' in ent:
- try:
- util.subp(('apt-key', 'add', '-'), ent['key'])
- except Exception:
- errorlist.append([source, "failed add key"])
-
try:
contents = "%s\n" % (source)
util.write_file(ent['filename'], contents, omode="ab")
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index 06c53272..98828b9e 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -56,7 +56,7 @@ def handle(name, _cfg, cloud, log, args):
event_names = ['cloud-config']
if not is_upstart_system():
- log.debug("not upstart system, '%s' disabled")
+ log.debug("not upstart system, '%s' disabled", name)
return
cfgpath = cloud.paths.get_ipath_cur("cloud_config")
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index b1de8f84..70d4e7c3 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -52,7 +52,8 @@ def handle(name, cfg, cloud, log, args):
# Get config
lxd_cfg = cfg.get('lxd')
if not lxd_cfg:
- log.debug("Skipping module named %s, not present or disabled by cfg")
+ log.debug("Skipping module named %s, not present or disabled by cfg",
+ name)
return
if not isinstance(lxd_cfg, dict):
log.warn("lxd config must be a dictionary. found a '%s'",
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 0f222c8c..5c29c804 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -31,6 +31,7 @@ import stat
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import ssh_util
from cloudinit import type_utils
from cloudinit import util
@@ -128,6 +129,8 @@ class Distro(object):
mirror_info=arch_info)
def apply_network(self, settings, bring_up=True):
+ # this applies network where 'settings' is interfaces(5) style
+ # it is obsolete compared to apply_network_config
# Write it out
dev_names = self._write_network(settings)
# Now try to bring them up
@@ -143,6 +146,9 @@ class Distro(object):
return self._bring_up_interfaces(dev_names)
return False
+ def apply_network_config_names(self, netconfig):
+ net.apply_network_config_names(netconfig)
+
@abc.abstractmethod
def apply_locale(self, locale, out_fn=None):
raise NotImplementedError()
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 37b92a83..76dda042 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -144,9 +144,7 @@ def _skip_retry_on_codes(status_codes, _request_args, cause):
"""Returns if a request should retry based on a given set of codes that
case retrying to be stopped/skipped.
"""
- if cause.code in status_codes:
- return False
- return True
+ return cause.code in status_codes
def get_instance_userdata(api_version='latest',
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 09d75e65..fb95babc 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -328,6 +328,7 @@ class Paths(object):
self.cfgs = path_cfgs
# Populate all the initial paths
self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud')
+ self.run_dir = path_cfgs.get('run_dir', '/run/cloud-init')
self.instance_link = os.path.join(self.cloud_dir, 'instance')
self.boot_finished = os.path.join(self.instance_link, "boot-finished")
self.upstart_conf_d = path_cfgs.get('upstart_dir')
@@ -349,26 +350,19 @@ class Paths(object):
"data": "data",
"vendordata_raw": "vendor-data.txt",
"vendordata": "vendor-data.txt.i",
+ "instance_id": ".instance-id",
}
# Set when a datasource becomes active
self.datasource = ds
# get_ipath_cur: get the current instance path for an item
def get_ipath_cur(self, name=None):
- ipath = self.instance_link
- add_on = self.lookups.get(name)
- if add_on:
- ipath = os.path.join(ipath, add_on)
- return ipath
+ return self._get_path(self.instance_link, name)
# get_cpath : get the "clouddir" (/var/lib/cloud/<name>)
# for a name in dirmap
def get_cpath(self, name=None):
- cpath = self.cloud_dir
- add_on = self.lookups.get(name)
- if add_on:
- cpath = os.path.join(cpath, add_on)
- return cpath
+ return self._get_path(self.cloud_dir, name)
# _get_ipath : get the instance path for a name in pathmap
# (/var/lib/cloud/instances/<instance>/<name>)
@@ -397,6 +391,14 @@ class Paths(object):
else:
return ipath
+ def _get_path(self, base, name=None):
+ if name is None:
+ return base
+ return os.path.join(base, self.lookups[name])
+
+ def get_runpath(self, name=None):
+ return self._get_path(self.run_dir, name)
+
# This config parser will not throw when sections don't exist
# and you are setting values on those sections which is useful
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index f8df58f0..f5668fff 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -19,77 +19,14 @@
import errno
import logging
import os
+import re
-from .import compat
-
-import yaml
+from cloudinit import util
LOG = logging.getLogger(__name__)
SYS_CLASS_NET = "/sys/class/net/"
-LINKS_FNAME_PREFIX = "etc/systemd/network/50-cloud-init-"
-
-NET_CONFIG_OPTIONS = [
- "address", "netmask", "broadcast", "network", "metric", "gateway",
- "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime",
- "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame",
- "netnum", "endpoint", "local", "ttl",
-]
-
-NET_CONFIG_COMMANDS = [
- "pre-up", "up", "post-up", "down", "pre-down", "post-down",
-]
-
-NET_CONFIG_BRIDGE_OPTIONS = [
- "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit",
- "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp",
-]
DEFAULT_PRIMARY_INTERFACE = 'eth0'
-
-# NOTE(harlowja): some of these are similar to what is in cloudinit main
-# source or utils tree/module but the reason that is done is so that this
-# whole module can be easily extracted and placed into other
-# code-bases (curtin for example).
-
-
-def write_file(path, content):
- base_path = os.path.dirname(path)
- if not os.path.isdir(base_path):
- os.makedirs(base_path)
- with open(path, "wb+") as fh:
- if isinstance(content, compat.text_type):
- content = content.encode("utf8")
- fh.write(content)
-
-
-def read_file(path, decode='utf8', enoent=None):
- try:
- with open(path, "rb") as fh:
- contents = fh.read()
- except OSError as e:
- if e.errno == errno.ENOENT and enoent is not None:
- return enoent
- raise
- if decode:
- return contents.decode(decode)
- return contents
-
-
-def dump_yaml(obj):
- return yaml.safe_dump(obj,
- line_break="\n",
- indent=4,
- explicit_start=True,
- explicit_end=True,
- default_flow_style=False)
-
-
-def read_yaml_file(path):
- val = yaml.safe_load(read_file(path))
- if not isinstance(val, dict):
- gotten_type_name = type(val).__name__
- raise TypeError("Expected dict to be loaded from %s, got"
- " '%s' instead" % (path, gotten_type_name))
- return val
+LINKS_FNAME_PREFIX = "etc/systemd/network/50-cloud-init-"
def sys_dev_path(devname, path=""):
@@ -97,7 +34,13 @@ def sys_dev_path(devname, path=""):
def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None):
- contents = read_file(sys_dev_path(devname, path), enoent=enoent)
+ try:
+ contents = util.load_file(sys_dev_path(devname, path))
+ except (OSError, IOError) as e:
+ if getattr(e, 'errno', None) == errno.ENOENT:
+ if enoent is not None:
+ return enoent
+ raise
contents = contents.strip()
if translate is None:
return contents
@@ -158,7 +101,7 @@ def get_devicelist():
class ParserError(Exception):
- """Raised when parser has issue parsing the interfaces file."""
+ """Raised when a parser has issue parsing a file/content."""
def is_disabled_cfg(cfg):
@@ -171,11 +114,10 @@ def sys_netdev_info(name, field):
if not os.path.exists(os.path.join(SYS_CLASS_NET, name)):
raise OSError("%s: interface does not exist in %s" %
(name, SYS_CLASS_NET))
-
fname = os.path.join(SYS_CLASS_NET, name, field)
if not os.path.exists(fname):
raise OSError("%s: could not find sysfs entry: %s" % (name, fname))
- data = read_file(fname)
+ data = util.load_file(fname)
if data[-1] == '\n':
data = data[:-1]
return data
@@ -251,4 +193,174 @@ def generate_fallback_config():
return nconf
+def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
+ """read the network config and rename devices accordingly.
+ if strict_present is false, then do not raise exception if no devices
+ match. if strict_busy is false, then do not raise exception if the
+ device cannot be renamed because it is currently configured."""
+ renames = []
+ for ent in netcfg.get('config', {}):
+ if ent.get('type') != 'physical':
+ continue
+ mac = ent.get('mac_address')
+ name = ent.get('name')
+ if not mac:
+ continue
+ renames.append([mac, name])
+
+ return _rename_interfaces(renames)
+
+
+def _get_current_rename_info(check_downable=True):
+ """Collect information necessary for rename_interfaces."""
+ names = get_devicelist()
+ bymac = {}
+ for n in names:
+ bymac[get_interface_mac(n)] = {
+ 'name': n, 'up': is_up(n), 'downable': None}
+
+ if check_downable:
+ nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
+ ipv6, _err = util.subp(['ip', '-6', 'addr', 'show', 'permanent',
+ 'scope', 'global'], capture=True)
+ ipv4, _err = util.subp(['ip', '-4', 'addr', 'show'], capture=True)
+
+ nics_with_addresses = set()
+ for bytes_out in (ipv6, ipv4):
+ nics_with_addresses.update(nmatch.findall(bytes_out))
+
+ for d in bymac.values():
+ d['downable'] = (d['up'] is False or
+ d['name'] not in nics_with_addresses)
+
+ return bymac
+
+
+def _rename_interfaces(renames, strict_present=True, strict_busy=True,
+ current_info=None):
+ if current_info is None:
+ current_info = _get_current_rename_info()
+
+ cur_bymac = {}
+ for mac, data in current_info.items():
+ cur = data.copy()
+ cur['mac'] = mac
+ cur_bymac[mac] = cur
+
+ def update_byname(bymac):
+ return {data['name']: data for data in bymac.values()}
+
+ def rename(cur, new):
+ util.subp(["ip", "link", "set", cur, "name", new], capture=True)
+
+ def down(name):
+ util.subp(["ip", "link", "set", name, "down"], capture=True)
+
+ def up(name):
+ util.subp(["ip", "link", "set", name, "up"], capture=True)
+
+ ops = []
+ errors = []
+ ups = []
+ cur_byname = update_byname(cur_bymac)
+ tmpname_fmt = "cirename%d"
+ tmpi = -1
+
+ for mac, new_name in renames:
+ cur = cur_bymac.get(mac, {})
+ cur_name = cur.get('name')
+ cur_ops = []
+ if cur_name == new_name:
+ # nothing to do
+ continue
+
+ if not cur_name:
+ if strict_present:
+ errors.append(
+ "[nic not present] Cannot rename mac=%s to %s"
+ ", not available." % (mac, new_name))
+ continue
+
+ if cur['up']:
+ msg = "[busy] Error renaming mac=%s from %s to %s"
+ if not cur['downable']:
+ if strict_busy:
+ errors.append(msg % (mac, cur_name, new_name))
+ continue
+ cur['up'] = False
+ cur_ops.append(("down", mac, new_name, (cur_name,)))
+ ups.append(("up", mac, new_name, (new_name,)))
+
+ if new_name in cur_byname:
+ target = cur_byname[new_name]
+ if target['up']:
+ msg = "[busy-target] Error renaming mac=%s from %s to %s."
+ if not target['downable']:
+ if strict_busy:
+ errors.append(msg % (mac, cur_name, new_name))
+ continue
+ else:
+ cur_ops.append(("down", mac, new_name, (new_name,)))
+
+ tmp_name = None
+ while tmp_name is None or tmp_name in cur_byname:
+ tmpi += 1
+ tmp_name = tmpname_fmt % tmpi
+
+ cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
+ target['name'] = tmp_name
+ cur_byname = update_byname(cur_bymac)
+ if target['up']:
+ ups.append(("up", mac, new_name, (tmp_name,)))
+
+ cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
+ cur['name'] = new_name
+ cur_byname = update_byname(cur_bymac)
+ ops += cur_ops
+
+ opmap = {'rename': rename, 'down': down, 'up': up}
+
+ if len(ops) + len(ups) == 0:
+ if len(errors):
+ LOG.debug("unable to do any work for renaming of %s", renames)
+ else:
+ LOG.debug("no work necessary for renaming of %s", renames)
+ else:
+ LOG.debug("achieving renaming of %s with ops %s", renames, ops + ups)
+
+ for op, mac, new_name, params in ops + ups:
+ try:
+ opmap.get(op)(*params)
+ except Exception as e:
+ errors.append(
+ "[unknown] Error performing %s%s for %s, %s: %s" %
+ (op, params, mac, new_name, e))
+
+ if len(errors):
+ raise Exception('\n'.join(errors))
+
+
+def get_interface_mac(ifname):
+ """Returns the string value of an interface's MAC Address"""
+ return read_sys_net(ifname, "address", enoent=False)
+
+
+def get_interfaces_by_mac(devs=None):
+ """Build a dictionary of tuples {mac: name}"""
+ if devs is None:
+ try:
+ devs = get_devicelist()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ devs = []
+ else:
+ raise
+ ret = {}
+ for name in devs:
+ mac = get_interface_mac(name)
+ # some devices may not have a mac (tun0)
+ if mac:
+ ret[mac] = name
+ return ret
+
# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 41cba893..39523be2 100644
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -43,17 +43,13 @@ def _load_shell_content(content, add_empty=False, empty_val=None):
then add entries in to the returned dictionary for 'VAR='
variables. Set their value to empty_val."""
data = {}
- for line in _shlex_split(content):
- try:
- key, value = line.split("=", 1)
- except ValueError:
- # Unsplittable line, skip it...
- pass
- else:
- if not value:
- value = empty_val
- if add_empty or value:
- data[key] = value
+ for line in shlex.split(content):
+ key, value = line.split("=", 1)
+ if not value:
+ value = empty_val
+ if add_empty or value:
+ data[key] = value
+
return data
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index f82c7f54..a695f5ed 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -18,10 +18,11 @@ import re
from . import LINKS_FNAME_PREFIX
from . import ParserError
-from . import write_file
from .udev import generate_udev_rule
+from cloudinit import util
+
NET_CONFIG_COMMANDS = [
"pre-up", "up", "post-up", "down", "pre-down", "post-down",
@@ -67,6 +68,7 @@ def _iface_add_subnet(iface, subnet):
# TODO: switch to valid_map for attrs
+
def _iface_add_attrs(iface):
content = ""
ignore_map = [
@@ -181,7 +183,11 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
ifaces[iface]['method'] = method
currif = iface
elif option == "hwaddress":
- ifaces[currif]['hwaddress'] = split[1]
+ if split[1] == "ether":
+ val = split[2]
+ else:
+ val = split[1]
+ ifaces[currif]['hwaddress'] = val
elif option in NET_CONFIG_OPTIONS:
ifaces[currif][option] = split[1]
elif option in NET_CONFIG_COMMANDS:
@@ -229,7 +235,7 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
ifaces[iface]['auto'] = False
-def _parse_deb_config(path):
+def parse_deb_config(path):
"""Parses a debian network configuration file."""
ifaces = {}
with open(path, "r") as fp:
@@ -241,6 +247,56 @@ def _parse_deb_config(path):
return ifaces
+def convert_eni_data(eni_data):
+ # return a network config representation of what is in eni_data
+ ifaces = {}
+ _parse_deb_config_data(ifaces, eni_data, src_dir=None, src_path=None)
+ return _ifaces_to_net_config_data(ifaces)
+
+
+def _ifaces_to_net_config_data(ifaces):
+ """Return network config that represents the ifaces data provided.
+ ifaces = parse_deb_config("/etc/network/interfaces")
+ config = ifaces_to_net_config_data(ifaces)
+ state = parse_net_config_data(config)."""
+ devs = {}
+ for name, data in ifaces.items():
+ # devname is 'eth0' for name='eth0:1'
+ devname = name.partition(":")[0]
+ if devname == "lo":
+ # currently provding 'lo' in network config results in duplicate
+ # entries. in rendered interfaces file. so skip it.
+ continue
+ if devname not in devs:
+ devs[devname] = {'type': 'physical', 'name': devname,
+ 'subnets': []}
+ # this isnt strictly correct, but some might specify
+ # hwaddress on a nic for matching / declaring name.
+ if 'hwaddress' in data:
+ devs[devname]['mac_address'] = data['hwaddress']
+ subnet = {'_orig_eni_name': name, 'type': data['method']}
+ if data.get('auto'):
+ subnet['control'] = 'auto'
+ else:
+ subnet['control'] = 'manual'
+
+ if data.get('method') == 'static':
+ subnet['address'] = data['address']
+
+ for copy_key in ('netmask', 'gateway', 'broadcast'):
+ if copy_key in data:
+ subnet[copy_key] = data[copy_key]
+
+ if 'dns' in data:
+ for n in ('nameservers', 'search'):
+ if n in data['dns'] and data['dns'][n]:
+ subnet['dns_' + n] = data['dns'][n]
+ devs[devname]['subnets'].append(subnet)
+
+ return {'version': 1,
+ 'config': [devs[d] for d in sorted(devs)]}
+
+
class Renderer(object):
"""Renders network information in a /etc/network/interfaces format."""
@@ -298,11 +354,10 @@ class Renderer(object):
route_line += " %s %s" % (mapping[k], route[k])
content += up + route_line + eol
content += down + route_line + eol
-
return content
def _render_interfaces(self, network_state):
- '''Given state, emit etc/network/interfaces content'''
+ '''Given state, emit etc/network/interfaces content.'''
content = ""
interfaces = network_state.get('interfaces')
@@ -345,6 +400,8 @@ class Renderer(object):
content += _iface_start_entry(iface, index)
content += _iface_add_subnet(iface, subnet)
content += _iface_add_attrs(iface)
+ for route in subnet.get('routes', []):
+ content += self._render_route(route, indent=" ")
else:
# ifenslave docs say to auto the slave devices
if 'bond-master' in iface:
@@ -360,19 +417,24 @@ class Renderer(object):
return content
def render_network_state(
- self, target, network_state,
- eni="etc/network/interfaces", links_prefix=LINKS_FNAME_PREFIX,
- netrules='etc/udev/rules.d/70-persistent-net.rules'):
+ self, target, network_state, eni="etc/network/interfaces",
+ links_prefix=LINKS_FNAME_PREFIX,
+ netrules='etc/udev/rules.d/70-persistent-net.rules',
+ writer=None):
- fpeni = os.path.join(target, eni)
- write_file(fpeni, self._render_interfaces(network_state))
+ fpeni = os.path.sep.join((target, eni,))
+ util.ensure_dir(os.path.dirname(fpeni))
+ util.write_file(fpeni, self._render_interfaces(network_state))
if netrules:
- netrules = os.path.join(target, netrules)
- write_file(netrules, self._render_persistent_net(network_state))
+ netrules = os.path.sep.join((target, netrules,))
+ util.ensure_dir(os.path.dirname(netrules))
+ util.write_file(netrules,
+ self._render_persistent_net(network_state))
if links_prefix:
- self._render_systemd_links(target, network_state, links_prefix)
+ self._render_systemd_links(target, network_state,
+ links_prefix=links_prefix)
def _render_systemd_links(self, target, network_state,
links_prefix=LINKS_FNAME_PREFIX):
@@ -392,4 +454,4 @@ class Renderer(object):
"Name=" + iface['name'],
""
])
- write_file(fname, content)
+ util.write_file(fname, content)
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 32c48229..1e82e75d 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -20,8 +20,8 @@ import functools
import logging
from . import compat
-from . import dump_yaml
-from . import read_yaml_file
+
+from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -49,7 +49,7 @@ def parse_net_config(path, skip_broken=True):
"""Parses a curtin network configuration file and
return network state"""
ns = None
- net_config = read_yaml_file(path)
+ net_config = util.read_conf(path)
if 'network' in net_config:
ns = parse_net_config_data(net_config.get('network'),
skip_broken=skip_broken)
@@ -58,7 +58,7 @@ def parse_net_config(path, skip_broken=True):
def from_state_file(state_file):
network_state = None
- state = read_yaml_file(state_file)
+ state = util.read_conf(state_file)
network_state = NetworkState()
network_state.load(state)
return network_state
@@ -136,7 +136,7 @@ class NetworkState(object):
'config': self.config,
'network_state': self.network_state,
}
- return dump_yaml(state)
+ return util.yaml_dumps(state)
def load(self, state):
if 'version' not in state:
@@ -155,7 +155,7 @@ class NetworkState(object):
setattr(self, key, state[key])
def dump_network_state(self):
- return dump_yaml(self.network_state)
+ return util.yaml_dumps(self.network_state)
def parse_config(self, skip_broken=True):
# rebuild network state
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 33fe78b9..d1f806d6 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -27,8 +27,6 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
-VALID_DSMODES = ("local", "net", "disabled")
-
class DataSourceCloudSigma(sources.DataSource):
"""
@@ -38,7 +36,6 @@ class DataSourceCloudSigma(sources.DataSource):
http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
"""
def __init__(self, sys_cfg, distro, paths):
- self.dsmode = 'local'
self.cepko = Cepko()
self.ssh_public_key = ''
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -84,11 +81,9 @@ class DataSourceCloudSigma(sources.DataSource):
LOG.debug("CloudSigma: Unable to read from serial port")
return False
- dsmode = server_meta.get('cloudinit-dsmode', self.dsmode)
- if dsmode not in VALID_DSMODES:
- LOG.warn("Invalid dsmode %s, assuming default of 'net'", dsmode)
- dsmode = 'net'
- if dsmode == "disabled" or dsmode != self.dsmode:
+ self.dsmode = self._determine_dsmode(
+ [server_meta.get('cloudinit-dsmode')])
+ if dsmode == sources.DSMODE_DISABLED:
return False
base64_fields = server_meta.get('base64_fields', '').split(',')
@@ -120,17 +115,13 @@ class DataSourceCloudSigma(sources.DataSource):
return self.metadata['uuid']
-class DataSourceCloudSigmaNet(DataSourceCloudSigma):
- def __init__(self, sys_cfg, distro, paths):
- DataSourceCloudSigma.__init__(self, sys_cfg, distro, paths)
- self.dsmode = 'net'
-
+# Legacy: Must be present in case we load an old pkl object
+DataSourceCloudSigmaNet = DataSourceCloudSigma
# Used to match classes to dependencies. Since this datasource uses the serial
# port network is not really required, so it's okay to load without it, too.
datasources = [
(DataSourceCloudSigma, (sources.DEP_FILESYSTEM)),
- (DataSourceCloudSigmaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 4478c4e2..cea08de2 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -24,6 +24,8 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
+from cloudinit.net import eni
+
from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__)
@@ -34,7 +36,6 @@ DEFAULT_MODE = 'pass'
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
-VALID_DSMODES = ("local", "net", "pass", "disabled")
FS_TYPES = ('vfat', 'iso9660')
LABEL_TYPES = ('config-2',)
POSSIBLE_MOUNTS = ('sr', 'cd')
@@ -46,12 +47,13 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
- self.dsmode = 'local'
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
self.ec2_metadata = None
self._network_config = None
self.network_json = None
+ self.network_eni = None
+ self.known_macs = None
self.files = {}
def __str__(self):
@@ -97,39 +99,23 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
md = results.get('metadata', {})
md = util.mergemanydict([md, DEFAULT_METADATA])
- user_dsmode = results.get('dsmode', None)
- if user_dsmode not in VALID_DSMODES + (None,):
- LOG.warn("User specified invalid mode: %s", user_dsmode)
- user_dsmode = None
- dsmode = get_ds_mode(cfgdrv_ver=results['version'],
- ds_cfg=self.ds_cfg.get('dsmode'),
- user=user_dsmode)
+ self.dsmode = self._determine_dsmode(
+ [results.get('dsmode'), self.ds_cfg.get('dsmode'),
+ sources.DSMODE_PASS if results['version'] == 1 else None])
- if dsmode == "disabled":
- # most likely user specified
+ if self.dsmode == sources.DSMODE_DISABLED:
return False
- # TODO(smoser): fix this, its dirty.
- # we want to do some things (writing files and network config)
- # only on first boot, and even then, we want to do so in the
- # local datasource (so they happen earlier) even if the configured
- # dsmode is 'net' or 'pass'. To do this, we check the previous
- # instance-id
+ # This is legacy and sneaky. If dsmode is 'pass' then write
+ # 'injected files' and apply legacy ENI network format.
prev_iid = get_previous_iid(self.paths)
cur_iid = md['instance-id']
- if prev_iid != cur_iid and \
- self.dsmode == "local" and not skip_first_boot:
+ if (prev_iid != cur_iid and self.dsmode == sources.DSMODE_PASS
+ and not skip_first_boot):
on_first_boot(results, distro=self.distro)
-
- # dsmode != self.dsmode here if:
- # * dsmode = "pass", pass means it should only copy files and then
- # pass to another datasource
- # * dsmode = "net" and self.dsmode = "local"
- # so that user boothooks would be applied with network, the
- # local datasource just gets out of the way, and lets the net claim
- if dsmode != self.dsmode:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self,
+ self.dsmode)
return False
self.source = found
@@ -147,12 +133,11 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
- try:
- self.network_json = results.get('networkdata')
- except ValueError as e:
- LOG.warn("Invalid content in network-data: %s", e)
- self.network_json = None
-
+ # network_config is an /etc/network/interfaces formated file and is
+ # obsolete compared to networkdata (from network_data.json) but both
+ # might be present.
+ self.network_eni = results.get("network_config")
+ self.network_json = results.get('networkdata')
return True
def check_instance_id(self, sys_cfg):
@@ -163,42 +148,17 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
def network_config(self):
if self._network_config is None:
if self.network_json is not None:
+ LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json)
+ self.network_json, known_macs=self.known_macs)
+ elif self.network_eni is not None:
+ self._network_config = eni.convert_eni_data(self.network_eni)
+ LOG.debug("network config provided via converted eni data")
+ else:
+ LOG.debug("no network configuration available")
return self._network_config
-class DataSourceConfigDriveNet(DataSourceConfigDrive):
- def __init__(self, sys_cfg, distro, paths):
- DataSourceConfigDrive.__init__(self, sys_cfg, distro, paths)
- self.dsmode = 'net'
-
-
-def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
- """Determine what mode should be used.
- valid values are 'pass', 'disabled', 'local', 'net'
- """
- # user passed data trumps everything
- if user is not None:
- return user
-
- if ds_cfg is not None:
- return ds_cfg
-
- # at config-drive version 1, the default behavior was pass. That
- # meant to not use use it as primary data source, but expect a ec2 metadata
- # source. for version 2, we default to 'net', which means
- # the DataSourceConfigDriveNet, would be used.
- #
- # this could change in the future. If there was definitive metadata
- # that indicated presense of an openstack metadata service, then
- # we could change to 'pass' by default also. The motivation for that
- # would be 'cloud-init query' as the web service could be more dynamic
- if cfgdrv_ver == 1:
- return "pass"
- return "net"
-
-
def read_config_drive(source_dir):
reader = openstack.ConfigDriveReader(source_dir)
finders = [
@@ -232,9 +192,12 @@ def on_first_boot(data, distro=None):
% (type(data)))
net_conf = data.get("network_config", '')
if net_conf and distro:
- LOG.debug("Updating network interfaces from config drive")
+ LOG.warn("Updating network interfaces from config drive")
distro.apply_network(net_conf)
- files = data.get('files', {})
+ write_injected_files(data.get('files'))
+
+
+def write_injected_files(files):
if files:
LOG.debug("Writing %s injected files", len(files))
for (filename, content) in files.items():
@@ -294,10 +257,12 @@ def find_candidate_devs(probe_optical=True):
return devices
+# Legacy: Must be present in case we load an old pkl object
+DataSourceConfigDriveNet = DataSourceConfigDrive
+
# Used to match classes to dependencies
datasources = [
- (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
- (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceConfigDrive, (sources.DEP_FILESYSTEM,)),
]
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 48c61a90..7e30118c 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -24,6 +24,7 @@ import errno
import os
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import sources
from cloudinit import util
@@ -35,7 +36,6 @@ class DataSourceNoCloud(sources.DataSource):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.dsmode = 'local'
self.seed = None
- self.cmdline_id = "ds=nocloud"
self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
os.path.join(paths.seed_dir, 'nocloud-net')]
self.seed_dir = None
@@ -58,7 +58,7 @@ class DataSourceNoCloud(sources.DataSource):
try:
# Parse the kernel command line, getting data passed in
md = {}
- if parse_cmdline_data(self.cmdline_id, md):
+ if load_cmdline_data(md):
found.append("cmdline")
mydata = _merge_new_seed(mydata, {'meta-data': md})
except Exception:
@@ -123,12 +123,6 @@ class DataSourceNoCloud(sources.DataSource):
mydata = _merge_new_seed(mydata, seeded)
- # For seed from a device, the default mode is 'net'.
- # that is more likely to be what is desired. If they want
- # dsmode of local, then they must specify that.
- if 'dsmode' not in mydata['meta-data']:
- mydata['meta-data']['dsmode'] = "net"
-
LOG.debug("Using data from %s", dev)
found.append(dev)
break
@@ -144,7 +138,6 @@ class DataSourceNoCloud(sources.DataSource):
if len(found) == 0:
return False
- seeded_network = None
# The special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
@@ -160,10 +153,6 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
- if (mydata['meta-data'].get('network-interfaces') or
- mydata.get('network-config')):
- seeded_network = self.dsmode
-
# This could throw errors, but the user told us to do it
# so if errors are raised, let them raise
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
@@ -179,35 +168,21 @@ class DataSourceNoCloud(sources.DataSource):
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
defaults])
- netdata = {'format': None, 'data': None}
- if mydata['meta-data'].get('network-interfaces'):
- netdata['format'] = 'interfaces'
- netdata['data'] = mydata['meta-data']['network-interfaces']
- elif mydata.get('network-config'):
- netdata['format'] = 'network-config'
- netdata['data'] = mydata['network-config']
-
- # if this is the local datasource or 'seedfrom' was used
- # and the source of the seed was self.dsmode.
- # Then see if there is network config to apply.
- # note this is obsolete network-interfaces style seeding.
- if self.dsmode in ("local", seeded_network):
- if mydata['meta-data'].get('network-interfaces'):
- LOG.debug("Updating network interfaces from %s", self)
- self.distro.apply_network(
- mydata['meta-data']['network-interfaces'])
-
- if mydata['meta-data']['dsmode'] == self.dsmode:
- self.seed = ",".join(found)
- self.metadata = mydata['meta-data']
- self.userdata_raw = mydata['user-data']
- self.vendordata_raw = mydata['vendor-data']
- self._network_config = mydata['network-config']
- return True
+ self.dsmode = self._determine_dsmode(
+ [mydata['meta-data'].get('dsmode')])
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- mydata['meta-data']['dsmode'])
- return False
+ if self.dsmode == sources.DSMODE_DISABLED:
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self,
+ self.dsmode)
+ return False
+
+ self.seed = ",".join(found)
+ self.metadata = mydata['meta-data']
+ self.userdata_raw = mydata['user-data']
+ self.vendordata_raw = mydata['vendor-data']
+ self._network_config = mydata['network-config']
+ self._network_eni = mydata['meta-data'].get('network-interfaces')
+ return True
def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
@@ -227,6 +202,9 @@ class DataSourceNoCloud(sources.DataSource):
@property
def network_config(self):
+ if self._network_config is None:
+ if self.network_eni is not None:
+ self._network_config = net.convert_eni_data(self.network_eni)
return self._network_config
@@ -254,8 +232,22 @@ def _quick_read_instance_id(cmdline_id, dirs=None):
return None
+def load_cmdline_data(fill, cmdline=None):
+ pairs = [("ds=nocloud", sources.DSMODE_LOCAL),
+ ("ds=nocloud-net", sources.DSMODE_NETWORK)]
+ for idstr, dsmode in pairs:
+ if parse_cmdline_data(idstr, fill, cmdline):
+ # if dsmode was explicitly in the commanad line, then
+ # prefer it to the dsmode based on the command line id
+ if 'dsmode' not in fill:
+ fill['dsmode'] = dsmode
+ return True
+ return False
+
+
# Returns true or false indicating if cmdline indicated
-# that this module should be used
+# that this module should be used. Updates dictionary 'fill'
+# with data that was found.
# Example cmdline:
# root=LABEL=uec-rootfs ro ds=nocloud
def parse_cmdline_data(ds_id, fill, cmdline=None):
@@ -319,9 +311,7 @@ def _merge_new_seed(cur, seeded):
class DataSourceNoCloudNet(DataSourceNoCloud):
def __init__(self, sys_cfg, distro, paths):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
- self.cmdline_id = "ds=nocloud-net"
self.supported_seed_starts = ("http://", "https://", "ftp://")
- self.dsmode = "net"
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 681f3a96..8f85b115 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -37,16 +37,13 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
DEFAULT_IID = "iid-dsopennebula"
-DEFAULT_MODE = 'net'
DEFAULT_PARSEUSER = 'nobody'
CONTEXT_DISK_FILES = ["context.sh"]
-VALID_DSMODES = ("local", "net", "disabled")
class DataSourceOpenNebula(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.dsmode = 'local'
self.seed = None
self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
@@ -93,52 +90,27 @@ class DataSourceOpenNebula(sources.DataSource):
md = util.mergemanydict([md, defaults])
# check for valid user specified dsmode
- user_dsmode = results['metadata'].get('DSMODE', None)
- if user_dsmode not in VALID_DSMODES + (None,):
- LOG.warn("user specified invalid mode: %s", user_dsmode)
- user_dsmode = None
-
- # decide dsmode
- if user_dsmode:
- dsmode = user_dsmode
- elif self.ds_cfg.get('dsmode'):
- dsmode = self.ds_cfg.get('dsmode')
- else:
- dsmode = DEFAULT_MODE
-
- if dsmode == "disabled":
- # most likely user specified
- return False
-
- # apply static network configuration only in 'local' dsmode
- if ('network-interfaces' in results and self.dsmode == "local"):
- LOG.debug("Updating network interfaces from %s", self)
- self.distro.apply_network(results['network-interfaces'])
+ self.dsmode = self._determine_dsmode(
+ [results.get('DSMODE'), self.ds_cfg.get('dsmode')])
- if dsmode != self.dsmode:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+ if self.dsmode == sources.DSMODE_DISABLED:
return False
self.seed = seed
+ self.network_eni = results.get("network_config")
self.metadata = md
self.userdata_raw = results.get('userdata')
return True
def get_hostname(self, fqdn=False, resolve_ip=None):
if resolve_ip is None:
- if self.dsmode == 'net':
+ if self.dsmode == sources.DSMODE_NET:
resolve_ip = True
else:
resolve_ip = False
return sources.DataSource.get_hostname(self, fqdn, resolve_ip)
-class DataSourceOpenNebulaNet(DataSourceOpenNebula):
- def __init__(self, sys_cfg, distro, paths):
- DataSourceOpenNebula.__init__(self, sys_cfg, distro, paths)
- self.dsmode = 'net'
-
-
class NonContextDiskDir(Exception):
pass
@@ -443,10 +415,12 @@ def read_context_disk_dir(source_dir, asuser=None):
return results
+# Legacy: Must be present in case we load an old pkl object
+DataSourceOpenNebulaNet = DataSourceOpenNebula
+
# Used to match classes to dependencies
datasources = [
(DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
- (DataSourceOpenNebulaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 3af17b10..c06d17f3 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -33,13 +33,11 @@ DEFAULT_IID = "iid-dsopenstack"
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
-VALID_DSMODES = ("net", "disabled")
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
- self.dsmode = 'net'
self.metadata_address = None
self.ssl_details = util.fetch_ssl_details(self.paths)
self.version = None
@@ -103,7 +101,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
- def get_data(self):
+ def get_data(self, retries=5, timeout=5):
try:
if not self.wait_for_metadata_service():
return False
@@ -115,7 +113,9 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
'Crawl of openstack metadata service',
read_metadata_service,
args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details})
+ kwargs={'ssl_details': self.ssl_details,
+ 'retries': retries,
+ 'timeout': timeout})
except openstack.NonReadable:
return False
except (openstack.BrokenMetadata, IOError):
@@ -123,11 +123,8 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
self.metadata_address)
return False
- user_dsmode = results.get('dsmode', None)
- if user_dsmode not in VALID_DSMODES + (None,):
- LOG.warn("User specified invalid mode: %s", user_dsmode)
- user_dsmode = None
- if user_dsmode == 'disabled':
+ self.dsmode = self._determine_dsmode([results.get('dsmode')])
+ if self.dsmode == sources.DSMODE_DISABLED:
return False
md = results.get('metadata', {})
@@ -153,8 +150,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
return sources.instance_id_matches_system_uuid(self.get_instance_id())
-def read_metadata_service(base_url, ssl_details=None):
- reader = openstack.MetadataReader(base_url, ssl_details=ssl_details)
+def read_metadata_service(base_url, ssl_details=None,
+ timeout=5, retries=5):
+ reader = openstack.MetadataReader(base_url, ssl_details=ssl_details,
+ timeout=timeout, retries=retries)
return reader.read_v2()
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index c7641eb3..08bc132b 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -32,13 +32,13 @@
# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
# Comments with "@datadictionary" are snippets of the definition
+import base64
import binascii
-import contextlib
+import json
import os
import random
import re
import socket
-import stat
from cloudinit import log as logging
from cloudinit import serial
@@ -62,14 +62,36 @@ SMARTOS_ATTRIB_MAP = {
'operator-script': ('sdc:operator-script', False),
}
+SMARTOS_ATTRIB_JSON = {
+ # Cloud-init Key : (SmartOS Key known JSON)
+ 'network-data': 'sdc:nics',
+}
+
+SMARTOS_ENV_LX_BRAND = "lx-brand"
+SMARTOS_ENV_KVM = "kvm"
+
DS_NAME = 'SmartOS'
DS_CFG_PATH = ['datasource', DS_NAME]
+NO_BASE64_DECODE = [
+ 'iptables_disable',
+ 'motd_sys_info',
+ 'root_authorized_keys',
+ 'sdc:datacenter_name',
+ 'sdc:uuid'
+ 'user-data',
+ 'user-script',
+]
+
+METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
+SERIAL_DEVICE = '/dev/ttyS1'
+SERIAL_TIMEOUT = 60
+
# BUILT-IN DATASOURCE CONFIGURATION
# The following is the built-in configuration. If the values
# are not set via the system configuration, then these default
# will be used:
# serial_device: which serial device to use for the meta-data
-# seed_timeout: how long to wait on the device
+# serial_timeout: how long to wait on the device
# no_base64_decode: values which are not base64 encoded and
# are fetched directly from SmartOS, not meta-data values
# base64_keys: meta-data keys that are delivered in base64
@@ -79,16 +101,10 @@ DS_CFG_PATH = ['datasource', DS_NAME]
# fs_setup: describes how to format the ephemeral drive
#
BUILTIN_DS_CONFIG = {
- 'serial_device': '/dev/ttyS1',
- 'metadata_sockfile': '/native/.zonecontrol/metadata.sock',
- 'seed_timeout': 60,
- 'no_base64_decode': ['root_authorized_keys',
- 'motd_sys_info',
- 'iptables_disable',
- 'user-data',
- 'user-script',
- 'sdc:datacenter_name',
- 'sdc:uuid'],
+ 'serial_device': SERIAL_DEVICE,
+ 'serial_timeout': SERIAL_TIMEOUT,
+ 'metadata_sockfile': METADATA_SOCKFILE,
+ 'no_base64_decode': NO_BASE64_DECODE,
'base64_keys': [],
'base64_all': False,
'disk_aliases': {'ephemeral0': '/dev/vdb'},
@@ -152,59 +168,41 @@ LEGACY_USER_D = "/var/db"
class DataSourceSmartOS(sources.DataSource):
+ _unset = "_unset"
+ smartos_type = _unset
+ md_client = _unset
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.is_smartdc = None
self.ds_cfg = util.mergemanydict([
self.ds_cfg,
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
self.metadata = {}
+ self.network_data = None
+ self._network_config = None
- # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
- # report 'BrandZ virtual linux' as the kernel version
- if os.uname()[3].lower() == 'brandz virtual linux':
- LOG.debug("Host is SmartOS, guest in Zone")
- self.is_smartdc = True
- self.smartos_type = 'lx-brand'
- self.cfg = {}
- self.seed = self.ds_cfg.get("metadata_sockfile")
- else:
- self.is_smartdc = True
- self.smartos_type = 'kvm'
- self.seed = self.ds_cfg.get("serial_device")
- self.cfg = BUILTIN_CLOUD_CONFIG
- self.seed_timeout = self.ds_cfg.get("serial_timeout")
- self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
- self.b64_keys = self.ds_cfg.get('base64_keys')
- self.b64_all = self.ds_cfg.get('base64_all')
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
+ self._init()
+
def __str__(self):
root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def _get_seed_file_object(self):
- if not self.seed:
- raise AttributeError("seed device is not set")
-
- if self.smartos_type == 'lx-brand':
- if not stat.S_ISSOCK(os.stat(self.seed).st_mode):
- LOG.debug("Seed %s is not a socket", self.seed)
- return None
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.connect(self.seed)
- return sock.makefile('rwb')
- else:
- if not stat.S_ISCHR(os.stat(self.seed).st_mode):
- LOG.debug("Seed %s is not a character device")
- return None
- ser = serial.Serial(self.seed, timeout=self.seed_timeout)
- if not ser.isOpen():
- raise SystemError("Unable to open %s" % self.seed)
- return ser
- return None
+ return "%s [client=%s]" % (root, self.md_client)
+
+ def _init(self):
+ if self.smartos_type == self._unset:
+ self.smartos_type = get_smartos_environ()
+ if self.smartos_type is None:
+ self.md_client = None
+
+ if self.md_client == self._unset:
+ self.md_client = jmc_client_factory(
+ smartos_type=self.smartos_type,
+ metadata_sockfile=self.ds_cfg['metadata_sockfile'],
+ serial_device=self.ds_cfg['serial_device'],
+ serial_timeout=self.ds_cfg['serial_timeout'])
def _set_provisioned(self):
'''Mark the instance provisioning state as successful.
@@ -223,50 +221,26 @@ class DataSourceSmartOS(sources.DataSource):
'/'.join([svc_path, 'provision_success']))
def get_data(self):
+ self._init()
+
md = {}
ud = ""
- if not device_exists(self.seed):
- LOG.debug("No metadata device '%s' found for SmartOS datasource",
- self.seed)
+ if not self.smartos_type:
+ LOG.debug("Not running on smartos")
return False
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmidcode in dmi_data() crashes kvm process
- LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)")
+ if not self.md_client.exists():
+ LOG.debug("No metadata device '%r' found for SmartOS datasource",
+ self.md_client)
return False
- # SDC KVM instances will provide dmi data, LX-brand does not
- if self.smartos_type == 'kvm':
- dmi_info = dmi_data()
- if dmi_info is None:
- LOG.debug("No dmidata utility found")
- return False
-
- system_type = dmi_info
- if 'smartdc' not in system_type.lower():
- LOG.debug("Host is not on SmartOS. system_type=%s",
- system_type)
- return False
- LOG.debug("Host is SmartOS, guest in KVM")
-
- seed_obj = self._get_seed_file_object()
- if seed_obj is None:
- LOG.debug('Seed file object not found.')
- return False
- with contextlib.closing(seed_obj) as seed:
- b64_keys = self.query('base64_keys', seed, strip=True, b64=False)
- if b64_keys is not None:
- self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
+ for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
+ smartos_noun, strip = attribute
+ md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
- b64_all = self.query('base64_all', seed, strip=True, b64=False)
- if b64_all is not None:
- self.b64_all = util.is_true(b64_all)
-
- for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
- smartos_noun, strip = attribute
- md[ci_noun] = self.query(smartos_noun, seed, strip=strip)
+ for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
+ md[ci_noun] = self.md_client.get_json(smartos_noun)
# @datadictionary: This key may contain a program that is written
# to a file in the filesystem of the guest on each boot and then
@@ -316,6 +290,7 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
self.vendordata_raw = md['vendor-data']
+ self.network_data = md['network-data']
self._set_provisioned()
return True
@@ -324,69 +299,20 @@ class DataSourceSmartOS(sources.DataSource):
return self.ds_cfg['disk_aliases'].get(name)
def get_config_obj(self):
- return self.cfg
+ if self.smartos_type == SMARTOS_ENV_KVM:
+ return BUILTIN_CLOUD_CONFIG
+ return {}
def get_instance_id(self):
return self.metadata['instance-id']
- def query(self, noun, seed_file, strip=False, default=None, b64=None):
- if b64 is None:
- if noun in self.smartos_no_base64:
- b64 = False
- elif self.b64_all or noun in self.b64_keys:
- b64 = True
-
- return self._query_data(noun, seed_file, strip=strip,
- default=default, b64=b64)
-
- def _query_data(self, noun, seed_file, strip=False,
- default=None, b64=None):
- """Makes a request via "GET <NOUN>"
-
- In the response, the first line is the status, while subsequent
- lines are is the value. A blank line with a "." is used to
- indicate end of response.
-
- If the response is expected to be base64 encoded, then set
- b64encoded to true. Unfortantely, there is no way to know if
- something is 100% encoded, so this method relies on being told
- if the data is base64 or not.
- """
-
- if not noun:
- return False
-
- response = JoyentMetadataClient(seed_file).get_metadata(noun)
-
- if response is None:
- return default
-
- if b64 is None:
- b64 = self._query_data('b64-%s' % noun, seed_file, b64=False,
- default=False, strip=True)
- b64 = util.is_true(b64)
-
- resp = None
- if b64 or strip:
- resp = "".join(response).rstrip()
- else:
- resp = "".join(response)
-
- if b64:
- try:
- return util.b64d(resp)
- # Bogus input produces different errors in Python 2 and 3;
- # catch both.
- except (TypeError, binascii.Error):
- LOG.warn("Failed base64 decoding key '%s'", noun)
- return resp
-
- return resp
-
-
-def device_exists(device):
- """Symplistic method to determine if the device exists or not"""
- return os.path.exists(device)
+ @property
+ def network_config(self):
+ if self._network_config is None:
+ if self.network_data is not None:
+ self._network_config = (
+ convert_smartos_network_data(self.network_data))
+ return self._network_config
class JoyentMetadataFetchException(Exception):
@@ -405,8 +331,11 @@ class JoyentMetadataClient(object):
r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
r'( (?P<payload>.+))?)')
- def __init__(self, metasource):
- self.metasource = metasource
+ def __init__(self, smartos_type=None, fp=None):
+ if smartos_type is None:
+ smartos_type = get_smartos_environ()
+ self.smartos_type = smartos_type
+ self.fp = fp
def _checksum(self, body):
return '{0:08x}'.format(
@@ -434,37 +363,229 @@ class JoyentMetadataClient(object):
LOG.debug('Value "%s" found.', value)
return value
- def get_metadata(self, metadata_key):
- LOG.debug('Fetching metadata key "%s"...', metadata_key)
+ def request(self, rtype, param=None):
request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
- message_body = '{0} GET {1}'.format(request_id,
- util.b64e(metadata_key))
+ message_body = ' '.join((request_id, rtype,))
+ if param:
+ message_body += ' ' + base64.b64encode(param.encode()).decode()
msg = 'V2 {0} {1} {2}\n'.format(
len(message_body), self._checksum(message_body), message_body)
LOG.debug('Writing "%s" to metadata transport.', msg)
- self.metasource.write(msg.encode('ascii'))
- self.metasource.flush()
+
+ need_close = False
+ if not self.fp:
+ self.open_transport()
+ need_close = True
+
+ self.fp.write(msg.encode('ascii'))
+ self.fp.flush()
response = bytearray()
- response.extend(self.metasource.read(1))
+ response.extend(self.fp.read(1))
while response[-1:] != b'\n':
- response.extend(self.metasource.read(1))
+ response.extend(self.fp.read(1))
+
+ if need_close:
+ self.close_transport()
+
response = response.rstrip().decode('ascii')
LOG.debug('Read "%s" from metadata transport.', response)
if 'SUCCESS' not in response:
return None
- return self._get_value_from_frame(request_id, response)
+ value = self._get_value_from_frame(request_id, response)
+ return value
+
+ def get(self, key, default=None, strip=False):
+ result = self.request(rtype='GET', param=key)
+ if result is None:
+ return default
+ if result and strip:
+ result = result.strip()
+ return result
+
+ def get_json(self, key, default=None):
+ result = self.get(key, default=default)
+ if result is None:
+ return default
+ return json.loads(result)
+
+ def list(self):
+ result = self.request(rtype='KEYS')
+ if result:
+ result = result.split('\n')
+ return result
+
+ def put(self, key, val):
+ param = b' '.join([base64.b64encode(i.encode())
+ for i in (key, val)]).decode()
+ return self.request(rtype='PUT', param=param)
+
+ def delete(self, key):
+ return self.request(rtype='DELETE', param=key)
+
+ def close_transport(self):
+ if self.fp:
+ self.fp.close()
+ self.fp = None
+
+ def __enter__(self):
+ if self.fp:
+ return self
+ self.open_transport()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close_transport()
+ return
+
+ def open_transport(self):
+ raise NotImplementedError
+
+
+class JoyentMetadataSocketClient(JoyentMetadataClient):
+ def __init__(self, socketpath):
+ self.socketpath = socketpath
+
+ def open_transport(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(self.socketpath)
+ self.fp = sock.makefile('rwb')
+
+ def exists(self):
+ return os.path.exists(self.socketpath)
+
+ def __repr__(self):
+ return "%s(socketpath=%s)" % (self.__class__.__name__, self.socketpath)
+
+
+class JoyentMetadataSerialClient(JoyentMetadataClient):
+ def __init__(self, device, timeout=10, smartos_type=None):
+ super(JoyentMetadataSerialClient, self).__init__(smartos_type)
+ self.device = device
+ self.timeout = timeout
+
+ def exists(self):
+ return os.path.exists(self.device)
+
+ def open_transport(self):
+ ser = serial.Serial(self.device, timeout=self.timeout)
+ if not ser.isOpen():
+ raise SystemError("Unable to open %s" % self.device)
+ self.fp = ser
+
+ def __repr__(self):
+ return "%s(device=%s, timeout=%s)" % (
+ self.__class__.__name__, self.device, self.timeout)
+
+
+class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
+ """V1 of the protocol was not safe for all values.
+ Thus, we allowed the user to pass values in as base64 encoded.
+ Users may still reasonably expect to be able to send base64 data
+ and have it transparently decoded. So even though the V2 format is
+ now used, and is safe (using base64 itself), we keep legacy support.
+
+ The way for a user to do this was:
+ a.) specify 'base64_keys' key whose value is a comma delimited
+ list of keys that were base64 encoded.
+ b.) base64_all: string interpreted as a boolean that indicates
+ if all keys are base64 encoded.
+ c.) set a key named b64-<keyname> with a boolean indicating that
+ <keyname> is base64 encoded."""
+
+ def __init__(self, device, timeout=10, smartos_type=None):
+ s = super(JoyentMetadataLegacySerialClient, self)
+ s.__init__(device, timeout, smartos_type)
+ self.base64_keys = None
+ self.base64_all = None
+
+ def _init_base64_keys(self, reset=False):
+ if reset:
+ self.base64_keys = None
+ self.base64_all = None
+
+ keys = None
+ if self.base64_all is None:
+ keys = self.list()
+ if 'base64_all' in keys:
+ self.base64_all = util.is_true(self._get("base64_all"))
+ else:
+ self.base64_all = False
+
+ if self.base64_all:
+ # short circuit if base64_all is true
+ return
+
+ if self.base64_keys is None:
+ if keys is None:
+ keys = self.list()
+ b64_keys = set()
+ if 'base64_keys' in keys:
+ b64_keys = set(self._get("base64_keys").split(","))
+
+ # now add any b64-<keyname> that has a true value
+ for key in [k[3:] for k in keys if k.startswith("b64-")]:
+ if util.is_true(self._get(key)):
+ b64_keys.add(key)
+ else:
+ if key in b64_keys:
+ b64_keys.remove(key)
+
+ self.base64_keys = b64_keys
+
+ def _get(self, key, default=None, strip=False):
+ return (super(JoyentMetadataLegacySerialClient, self).
+ get(key, default=default, strip=strip))
+
+ def is_b64_encoded(self, key, reset=False):
+ if key in NO_BASE64_DECODE:
+ return False
+
+ self._init_base64_keys(reset=reset)
+ if self.base64_all:
+ return True
+
+ return key in self.base64_keys
+
+ def get(self, key, default=None, strip=False):
+ mdefault = object()
+ val = self._get(key, strip=False, default=mdefault)
+ if val is mdefault:
+ return default
+
+ if self.is_b64_encoded(key):
+ try:
+ val = base64.b64decode(val.encode()).decode()
+ # Bogus input produces different errors in Python 2 and 3
+ except (TypeError, binascii.Error):
+ LOG.warn("Failed base64 decoding key '%s': %s", key, val)
+
+ if strip:
+ val = val.strip()
+
+ return val
+
+def jmc_client_factory(
+ smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
+ serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
+ uname_version=None):
-def dmi_data():
- sys_type = util.read_dmi_data("system-product-name")
+ if smartos_type is None:
+ smartos_type = get_smartos_environ(uname_version)
- if not sys_type:
+ if smartos_type is None:
return None
+ elif smartos_type == SMARTOS_ENV_KVM:
+ return JoyentMetadataLegacySerialClient(
+ device=serial_device, timeout=serial_timeout,
+ smartos_type=smartos_type)
+ elif smartos_type == SMARTOS_ENV_LX_BRAND:
+ return JoyentMetadataSocketClient(socketpath=metadata_sockfile)
- return sys_type
+ raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
def write_boot_content(content, content_f, link=None, shebang=False,
@@ -520,15 +641,141 @@ def write_boot_content(content, content_f, link=None, shebang=False,
util.ensure_dir(os.path.dirname(link))
os.symlink(content_f, link)
except IOError as e:
- util.logexc(LOG, "failed establishing content link", e)
+ util.logexc(LOG, "failed establishing content link: %s", e)
+
+
+def get_smartos_environ(uname_version=None, product_name=None,
+ uname_arch=None):
+ uname = os.uname()
+ if uname_arch is None:
+ uname_arch = uname[4]
+
+ if uname_arch.startswith("arm") or uname_arch == "aarch64":
+ return None
+
+ # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
+ # report 'BrandZ virtual linux' as the kernel version
+ if uname_version is None:
+ uname_version = uname[3]
+ if uname_version.lower() == 'brandz virtual linux':
+ return SMARTOS_ENV_LX_BRAND
+
+ if product_name is None:
+ system_type = util.read_dmi_data("system-product-name")
+ else:
+ system_type = product_name
+
+ if system_type and 'smartdc' in system_type.lower():
+ return SMARTOS_ENV_KVM
+
+ return None
+
+
+# Covert SMARTOS 'sdc:nics' data to network_config yaml
+def convert_smartos_network_data(network_data=None):
+ """Return a dictionary of network_config by parsing provided
+ SMARTOS sdc:nics configuration data
+
+ sdc:nics data is a dictionary of properties of a nic and the ip
+ configuration desired. Additional nic dictionaries are appended
+ to the list.
+
+ Converting the format is straightforward though it does include
+ duplicate information as well as data which appears to be relevant
+ to the hostOS rather than the guest.
+
+ For each entry in the nics list returned from query sdc:nics, we
+ create a type: physical entry, and extract the interface properties:
+ 'mac' -> 'mac_address', 'mtu', 'interface' -> 'name'. The remaining
+ keys are related to ip configuration. For each ip in the 'ips' list
+ we create a subnet entry under 'subnets' pairing the ip to a one in
+ the 'gateways' list.
+ """
+
+ valid_keys = {
+ 'physical': [
+ 'mac_address',
+ 'mtu',
+ 'name',
+ 'params',
+ 'subnets',
+ 'type',
+ ],
+ 'subnet': [
+ 'address',
+ 'broadcast',
+ 'dns_nameservers',
+ 'dns_search',
+ 'gateway',
+ 'metric',
+ 'netmask',
+ 'pointopoint',
+ 'routes',
+ 'scope',
+ 'type',
+ ],
+ }
+
+ config = []
+ for nic in network_data:
+ cfg = {k: v for k, v in nic.items()
+ if k in valid_keys['physical']}
+ cfg.update({
+ 'type': 'physical',
+ 'name': nic['interface']})
+ if 'mac' in nic:
+ cfg.update({'mac_address': nic['mac']})
+
+ subnets = []
+ for ip, gw in zip(nic['ips'], nic['gateways']):
+ subnet = {k: v for k, v in nic.items()
+ if k in valid_keys['subnet']}
+ subnet.update({
+ 'type': 'static',
+ 'address': ip,
+ 'gateway': gw,
+ })
+ subnets.append(subnet)
+ cfg.update({'subnets': subnets})
+ config.append(cfg)
+
+ return {'version': 1, 'config': config}
# Used to match classes to dependencies
datasources = [
- (DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import sys
+ jmc = jmc_client_factory()
+ if jmc is None:
+ print("Do not appear to be on smartos.")
+ sys.exit(1)
+ if len(sys.argv) == 1:
+ keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
+ list(SMARTOS_ATTRIB_MAP.keys()))
+ else:
+ keys = sys.argv[1:]
+
+ data = {}
+ for key in keys:
+ if key in SMARTOS_ATTRIB_JSON:
+ keyname = SMARTOS_ATTRIB_JSON[key]
+ data[key] = jmc.get_json(keyname)
+ else:
+ if key in SMARTOS_ATTRIB_MAP:
+ keyname, strip = SMARTOS_ATTRIB_MAP[key]
+ else:
+ keyname, strip = (key, False)
+ val = jmc.get(keyname, strip=strip)
+ data[key] = jmc.get(keyname, strip=strip)
+
+ print(json.dumps(data, indent=1))
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 43e4fd57..2a6b8d90 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -34,6 +34,13 @@ from cloudinit import util
from cloudinit.filters import launch_index
from cloudinit.reporting import events
+DSMODE_DISABLED = "disabled"
+DSMODE_LOCAL = "local"
+DSMODE_NETWORK = "net"
+DSMODE_PASS = "pass"
+
+VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK]
+
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
DS_PREFIX = 'DataSource'
@@ -57,6 +64,7 @@ class DataSource(object):
self.userdata_raw = None
self.vendordata = None
self.vendordata_raw = None
+ self.dsmode = DSMODE_NETWORK
# find the datasource config name.
# remove 'DataSource' from classname on front, and remove 'Net' on end.
@@ -223,10 +231,35 @@ class DataSource(object):
# quickly (local check only) if self.instance_id is still
return False
+ @staticmethod
+ def _determine_dsmode(candidates, default=None, valid=None):
+ # return the first candidate that is non None, warn if not valid
+ if default is None:
+ default = DSMODE_NETWORK
+
+ if valid is None:
+ valid = VALID_DSMODES
+
+ for candidate in candidates:
+ if candidate is None:
+ continue
+ if candidate in valid:
+ return candidate
+ else:
+ LOG.warn("invalid dsmode '%s', using default=%s",
+ candidate, default)
+ return default
+
+ return default
+
@property
def network_config(self):
return None
+ @property
+ def first_instance_boot(self):
+ return
+
def normalize_pubkey_data(pubkey_data):
keys = []
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 1af99118..494335b3 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -28,6 +28,7 @@ import six
from cloudinit import ec2_utils
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
@@ -190,14 +191,14 @@ class BaseReader(object):
versions_available)
return selected_version
- def _read_content_path(self, item):
+ def _read_content_path(self, item, decode=False):
path = item.get('content_path', '').lstrip("/")
path_pieces = path.split("/")
valid_pieces = [p for p in path_pieces if len(p)]
if not valid_pieces:
raise BrokenMetadata("Item %s has no valid content path" % (item))
path = self._path_join(self.base_path, "openstack", *path_pieces)
- return self._path_read(path)
+ return self._path_read(path, decode=decode)
def read_v2(self):
"""Reads a version 2 formatted location.
@@ -298,7 +299,8 @@ class BaseReader(object):
net_item = metadata.get("network_config", None)
if net_item:
try:
- results['network_config'] = self._read_content_path(net_item)
+ content = self._read_content_path(net_item, decode=True)
+ results['network_config'] = content
except IOError as e:
raise BrokenMetadata("Failed to read network"
" configuration: %s" % (e))
@@ -333,8 +335,8 @@ class ConfigDriveReader(BaseReader):
components = [base] + list(add_ons)
return os.path.join(*components)
- def _path_read(self, path):
- return util.load_file(path, decode=False)
+ def _path_read(self, path, decode=False):
+ return util.load_file(path, decode=decode)
def _fetch_available_versions(self):
if self._versions is None:
@@ -446,7 +448,7 @@ class MetadataReader(BaseReader):
self._versions = found
return self._versions
- def _path_read(self, path):
+ def _path_read(self, path, decode=False):
def should_retry_cb(_request_args, cause):
try:
@@ -463,7 +465,10 @@ class MetadataReader(BaseReader):
ssl_details=self.ssl_details,
timeout=self.timeout,
exception_cb=should_retry_cb)
- return response.contents
+ if decode:
+ return response.contents.decode()
+ else:
+ return response.contents
def _path_join(self, base, *add_ons):
return url_helper.combine_url(base, *add_ons)
@@ -474,7 +479,8 @@ class MetadataReader(BaseReader):
retries=self.retries)
-def convert_net_json(network_json):
+# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
+def convert_net_json(network_json=None, known_macs=None):
"""Return a dictionary of network_config by parsing provided
OpenStack ConfigDrive NetworkData json format
@@ -501,8 +507,10 @@ def convert_net_json(network_json):
enumerate a dictionary of valid keys for network_yaml and apply filtering
to drop these superflous keys from the network_config yaml.
"""
+ if network_json is None:
+ return None
- # Dict of network_config key for filtering network_json
+ # dict of network_config key for filtering network_json
valid_keys = {
'physical': [
'name',
@@ -510,6 +518,7 @@ def convert_net_json(network_json):
'mac_address',
'subnets',
'params',
+ 'mtu',
],
'subnet': [
'type',
@@ -519,7 +528,6 @@ def convert_net_json(network_json):
'metric',
'gateway',
'pointopoint',
- 'mtu',
'scope',
'dns_nameservers',
'dns_search',
@@ -534,13 +542,19 @@ def convert_net_json(network_json):
config = []
for link in links:
subnets = []
- cfg = dict((k, v) for k, v in link.items()
- if k in valid_keys['physical'])
- cfg.update({'name': link['id']})
- for network in [net for net in networks
- if net['link'] == link['id']]:
- subnet = dict((k, v) for k, v in network.items()
- if k in valid_keys['subnet'])
+ cfg = {k: v for k, v in link.items()
+ if k in valid_keys['physical']}
+ # 'name' is not in openstack spec yet, but we will support it if it is
+ # present. The 'id' in the spec is currently implemented as the host
+ # nic's name, meaning something like 'tap-adfasdffd'. We do not want
+ # to name guest devices with such ugly names.
+ if 'name' in link:
+ cfg['name'] = link['name']
+
+ for network in [n for n in networks
+ if n['link'] == link['id']]:
+ subnet = {k: v for k, v in network.items()
+ if k in valid_keys['subnet']}
if 'dhcp' in network['type']:
t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
subnet.update({
@@ -551,13 +565,13 @@ def convert_net_json(network_json):
'type': 'static',
'address': network.get('ip_address'),
})
- if network['type'] == 'ipv6':
- subnet['ipv6'] = True
- else:
- subnet['ipv4'] = True
+ if network['type'] == 'ipv4':
+ subnet['ipv4'] = True
+ if network['type'] == 'ipv6':
+ subnet['ipv6'] = True
subnets.append(subnet)
cfg.update({'subnets': subnets})
- if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']:
+ if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']:
cfg.update({
'type': 'physical',
'mac_address': link['ethernet_mac_address']})
@@ -580,19 +594,29 @@ def convert_net_json(network_json):
'vlan_id': link['vlan_id'],
'mac_address': link['vlan_mac_address'],
})
- elif link['type'] in ['bridge']:
- cfg.update({
- 'type': 'bridge',
- 'mac_address': link['ethernet_mac_address'],
- 'mtu': link['mtu']})
else:
raise ValueError(
'Unknown network_data link type: %s' % link['type'])
config.append(cfg)
+ need_names = [d for d in config
+ if d.get('type') == 'physical' and 'name' not in d]
+
+ if need_names:
+ if known_macs is None:
+ known_macs = net.get_interfaces_by_mac()
+
+ for d in need_names:
+ mac = d.get('mac_address')
+ if not mac:
+ raise ValueError("No mac_address or name entry for %s" % d)
+ if mac not in known_macs:
+ raise ValueError("Unable to find a system nic for %s" % d)
+ d['name'] = known_macs[mac]
+
for service in services:
- cfg = copy.deepcopy(service)
+ cfg = service
cfg.update({'type': 'nameserver'})
config.append(cfg)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index b837009a..9b3cc71c 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -53,6 +53,7 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
NULL_DATA_SOURCE = None
+NO_PREVIOUS_INSTANCE_ID = "NO_PREVIOUS_INSTANCE_ID"
class Init(object):
@@ -68,6 +69,7 @@ class Init(object):
# Changed only when a fetch occurs
self.datasource = NULL_DATA_SOURCE
self.ds_restored = False
+ self._previous_iid = None
if reporter is None:
reporter = events.ReportEventStack(
@@ -214,6 +216,31 @@ class Init(object):
cfg_list = self.cfg.get('datasource_list') or []
return (cfg_list, pkg_list)
+ def _restore_from_checked_cache(self, existing):
+ if existing not in ("check", "trust"):
+ raise ValueError("Unexpected value for existing: %s" % existing)
+
+ ds = self._restore_from_cache()
+ if not ds:
+ return (None, "no cache found")
+
+ run_iid_fn = self.paths.get_runpath('instance_id')
+ if os.path.exists(run_iid_fn):
+ run_iid = util.load_file(run_iid_fn).strip()
+ else:
+ run_iid = None
+
+ if run_iid == ds.get_instance_id():
+ return (ds, "restored from cache with run check: %s" % ds)
+ elif existing == "trust":
+ return (ds, "restored from cache: %s" % ds)
+ else:
+ if (hasattr(ds, 'check_instance_id') and
+ ds.check_instance_id(self.cfg)):
+ return (ds, "restored from checked cache: %s" % ds)
+ else:
+ return (None, "cache invalid in datasource: %s" % ds)
+
def _get_data_source(self, existing):
if self.datasource is not NULL_DATA_SOURCE:
return self.datasource
@@ -222,19 +249,9 @@ class Init(object):
name="check-cache",
description="attempting to read from cache [%s]" % existing,
parent=self.reporter) as myrep:
- ds = self._restore_from_cache()
- if ds and existing == "trust":
- myrep.description = "restored from cache: %s" % ds
- elif ds and existing == "check":
- if (hasattr(ds, 'check_instance_id') and
- ds.check_instance_id(self.cfg)):
- myrep.description = "restored from checked cache: %s" % ds
- else:
- myrep.description = "cache invalid in datasource: %s" % ds
- ds = None
- else:
- myrep.description = "no cache found"
+ ds, desc = self._restore_from_checked_cache(existing)
+ myrep.description = desc
self.ds_restored = bool(ds)
LOG.debug(myrep.description)
@@ -302,23 +319,41 @@ class Init(object):
# What the instance id was and is...
iid = self.datasource.get_instance_id()
- previous_iid = None
iid_fn = os.path.join(dp, 'instance-id')
- try:
- previous_iid = util.load_file(iid_fn).strip()
- except Exception:
- pass
- if not previous_iid:
- previous_iid = iid
+
+ previous_iid = self.previous_iid()
util.write_file(iid_fn, "%s\n" % iid)
+ util.write_file(self.paths.get_runpath('instance_id'), "%s\n" % iid)
util.write_file(os.path.join(dp, 'previous-instance-id'),
"%s\n" % (previous_iid))
+
+ self._write_to_cache()
# Ensure needed components are regenerated
# after change of instance which may cause
# change of configuration
self._reset()
return iid
+ def previous_iid(self):
+ if self._previous_iid is not None:
+ return self._previous_iid
+
+ dp = self.paths.get_cpath('data')
+ iid_fn = os.path.join(dp, 'instance-id')
+ try:
+ self._previous_iid = util.load_file(iid_fn).strip()
+ except Exception:
+ self._previous_iid = NO_PREVIOUS_INSTANCE_ID
+
+ LOG.debug("previous iid found to be %s", self._previous_iid)
+ return self._previous_iid
+
+ def is_new_instance(self):
+ previous = self.previous_iid()
+ ret = (previous == NO_PREVIOUS_INSTANCE_ID or
+ previous != self.datasource.get_instance_id())
+ return ret
+
def fetch(self, existing="check"):
return self._get_data_source(existing=existing)
@@ -333,8 +368,6 @@ class Init(object):
reporter=self.reporter)
def update(self):
- if not self._write_to_cache():
- return
self._store_userdata()
self._store_vendordata()
@@ -594,15 +627,27 @@ class Init(object):
return (ncfg, loc)
return (net.generate_fallback_config(), "fallback")
- def apply_network_config(self):
+ def apply_network_config(self, bring_up):
netcfg, src = self._find_networking_config()
if netcfg is None:
LOG.info("network config is disabled by %s", src)
return
- LOG.info("Applying network configuration from %s: %s", src, netcfg)
try:
- return self.distro.apply_network_config(netcfg)
+ LOG.debug("applying net config names for %s" % netcfg)
+ self.distro.apply_network_config_names(netcfg)
+ except Exception as e:
+ LOG.warn("Failed to rename devices: %s", e)
+
+ if (self.datasource is not NULL_DATA_SOURCE and
+ not self.is_new_instance()):
+ LOG.debug("not a new instance. network config is not applied.")
+ return
+
+ LOG.info("Applying network configuration from %s bringup=%s: %s",
+ src, bring_up, netcfg)
+ try:
+ return self.distro.apply_network_config(netcfg, bring_up=bring_up)
except NotImplementedError:
LOG.warn("distro '%s' does not implement apply_network_config. "
"networking may not be configured properly." %
@@ -795,16 +840,16 @@ class Modules(object):
def fetch_base_config():
base_cfgs = []
default_cfg = util.get_builtin_cfg()
- kern_contents = util.read_cc_from_cmdline()
-
- # Kernel/cmdline parameters override system config
- if kern_contents:
- base_cfgs.append(util.load_yaml(kern_contents, default={}))
# Anything in your conf.d location??
# or the 'default' cloud.cfg location???
base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG))
+ # Kernel/cmdline parameters override system config
+ kern_contents = util.read_cc_from_cmdline()
+ if kern_contents:
+ base_cfgs.append(util.load_yaml(kern_contents, default={}))
+
# And finally the default gets to play
if default_cfg:
base_cfgs.append(default_cfg)
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index a9231482..8a6ad417 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -142,6 +142,11 @@ def render_to_file(fn, outfn, params, mode=0o644):
util.write_file(outfn, contents, mode=mode)
+def render_string_to_file(content, outfn, params, mode=0o644):
+ contents = render_string(content, params)
+ util.write_file(outfn, contents, mode=mode)
+
+
def render_string(content, params):
if not params:
params = {}
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 851c1fcb..8873264d 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -338,6 +338,16 @@ def rand_str(strlen=32, select_from=None):
return "".join([random.choice(select_from) for _x in range(0, strlen)])
+def rand_dict_key(dictionary, postfix=None):
+ if not postfix:
+ postfix = ""
+ while True:
+ newkey = rand_str(strlen=8) + "_" + postfix
+ if newkey not in dictionary:
+ break
+ return newkey
+
+
def read_conf(fname):
try:
return load_yaml(load_file(fname), default={})
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 1236796c..62b297bc 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -72,14 +72,87 @@ apt_pipelining: False
# then apt_mirror above will have no effect
apt_preserve_sources_list: true
+# Provide a custom template for rendering sources.list
+# Default: a default template for Ubuntu/Debain will be used as packaged in
+# Ubuntu: /etc/cloud/templates/sources.list.ubuntu.tmpl
+# Debian: /etc/cloud/templates/sources.list.debian.tmpl
+# Others: n/a
+# This will follow the normal mirror/codename replacement rules before
+# being written to disk.
+apt_custom_sources_list: |
+ ## template:jinja
+ ## Note, this file is written by cloud-init on first boot of an instance
+ ## modifications made here will not survive a re-bundle.
+ ## if you wish to make changes you can:
+ ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
+ ## or do the same in user-data
+ ## b.) add sources in /etc/apt/sources.list.d
+ ## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
+ deb {{mirror}} {{codename}} main restricted
+ deb-src {{mirror}} {{codename}} main restricted
+
+ # could drop some of the usually used entries
+
+ # could refer to other mirrors
+ deb http://ddebs.ubuntu.com {{codename}} main restricted universe multiverse
+ deb http://ddebs.ubuntu.com {{codename}}-updates main restricted universe multiverse
+ deb http://ddebs.ubuntu.com {{codename}}-proposed main restricted universe multiverse
+
+ # or even more uncommon examples like local or NFS mounted repos,
+ # eventually whatever is compatible with sources.list syntax
+ deb file:/home/apt/debian unstable main contrib non-free
+
# 'source' entries in apt-sources that match this python regex
# expression will be passed to add-apt-repository
add_apt_repo_match: '^[\w-]+:\w'
+# 'apt_sources' is a dictionary
+# The key is the filename and will be prepended by /etc/apt/sources.list.d/ if
+# it doesn't start with a '/'.
+# There are certain cases - where no content is written into a source.list file
+# where the filename will be ignored - yet it can still be used as index for
+# merging.
+# The value it maps to is a dictionary with the following optional entries:
+# source: a sources.list entry (some variable replacements apply)
+# keyid: providing a key to import via shortid or fingerprint
+# key: providing a raw PGP key
+# keyserver: keyserver to fetch keys from, default is keyserver.ubuntu.com
+# filename: for compatibility with the older format (now the key to this
+# dictionary is the filename). If specified this overwrites the
+# filename given as key.
+
+# the new "filename: {specification-dictionary}, filename2: ..." format allows
+# better merging between multiple input files than a list like:
+# cloud-config1
+# sources:
+ s1: {'key': 'key1', 'source': 'source1'}
+# cloud-config2
+# sources:
+ s2: {'key': 'key2'}
+ s1: {filename: 'foo'}
+# this would be merged to
+#sources:
+# s1:
+# filename: foo
+# key: key1
+# source: source1
+# s2:
+# key: key2
+# Be aware that this style of merging is not the default (for backward
+# compatibility reasons). You should specify the following merge_how to get
+# this more complete and modern merging behaviour:
+# merge_how: "list()+dict()+str()"
+# This would then also be equivalent to the config merging used in curtin
+# (https://launchpad.net/curtin).
+
+# for more details see below in the various examples
+
apt_sources:
- - source: "deb http://ppa.launchpad.net/byobu/ppa/ubuntu karmic main"
+ byobu-ppa.list:
+ source: "deb http://ppa.launchpad.net/byobu/ppa/ubuntu karmic main"
keyid: F430BBA5 # GPG key ID published on a key server
- filename: byobu-ppa.list
+ # adding a source.list line, importing a gpg key for a given key id and
+ # storing it in the file /etc/apt/sources.list.d/byobu-ppa.list
# PPA shortcut:
# * Setup correct apt sources.list line
@@ -87,7 +160,9 @@ apt_sources:
#
# See https://help.launchpad.net/Packaging/PPA for more information
# this requires 'add-apt-repository'
- - source: "ppa:smoser/ppa" # Quote the string
+ # due to that the filename key is ignored in this case
+ ignored1:
+ source: "ppa:smoser/ppa" # Quote the string
# Custom apt repository:
# * all that is required is 'source'
@@ -95,29 +170,60 @@ apt_sources:
# * [optional] Import the apt signing key from the keyserver
# * Defaults:
# + keyserver: keyserver.ubuntu.com
- # + filename: cloud_config_sources.list
#
# See sources.list man page for more information about the format
- - source: deb http://archive.ubuntu.com/ubuntu karmic-backports main universe multiverse restricted
+ my-repo.list:
+ source: deb http://archive.ubuntu.com/ubuntu karmic-backports main universe multiverse restricted
# sources can use $MIRROR and $RELEASE and they will be replaced
# with the local mirror for this cloud, and the running release
# the entry below would be possibly turned into:
- # - source: deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu natty multiverse
- - source: deb $MIRROR $RELEASE multiverse
+ # source: deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu natty multiverse
+ my-repo.list:
+ source: deb $MIRROR $RELEASE multiverse
# this would have the same end effect as 'ppa:byobu/ppa'
- - source: "deb http://ppa.launchpad.net/byobu/ppa/ubuntu karmic main"
+ my-repo.list:
+ source: "deb http://ppa.launchpad.net/byobu/ppa/ubuntu karmic main"
keyid: F430BBA5 # GPG key ID published on a key server
filename: byobu-ppa.list
+ # this would only import the key without adding a ppa or other source spec
+ # since this doesn't generate a source.list file the filename key is ignored
+ ignored2:
+ keyid: F430BBA5 # GPG key ID published on a key server
+
+ # In general keyid's can also be specified via their long fingerprints
+ # since this doesn't generate a source.list file the filename key is ignored
+ ignored3:
+ keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77
+
# Custom apt repository:
# * The apt signing key can also be specified
# by providing a pgp public key block
- # * Providing the PBG key here is the most robust method for
+ # * Providing the PGP key here is the most robust method for
# specifying a key, as it removes dependency on a remote key server
+ my-repo.list:
+ source: deb http://ppa.launchpad.net/alestic/ppa/ubuntu karmic main
+ key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: SKS 1.0.10
+
+ mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
+ qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
+ 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
+ IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
+ 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
+ t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
+ uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
+ =Y2oI
+ -----END PGP PUBLIC KEY BLOCK-----
- - source: deb http://ppa.launchpad.net/alestic/ppa/ubuntu karmic main
+ # Custom gpg key:
+ # * As with keyid, a key may also be specified without a related source.
+ # * all other facts mentioned above still apply
+ # since this doesn't generate a source.list file the filename key is ignored
+ ignored4:
key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.0.10
@@ -132,6 +238,7 @@ apt_sources:
=Y2oI
-----END PGP PUBLIC KEY BLOCK-----
+
## apt config via system_info:
# under the 'system_info', you can further customize cloud-init's interaction
# with apt.
diff --git a/doc/merging.rst b/doc/merging.rst
index d4d5cd05..afe1a6dd 100644
--- a/doc/merging.rst
+++ b/doc/merging.rst
@@ -1,15 +1,16 @@
Overview
--------
-This was done because it has been a common feature request that there be a
-way to specify how cloud-config yaml "dictionaries" are merged together when
-there are multiple yamls to merge together (say when performing an #include).
+This was implemented because it has been a common feature request that there be
+a way to specify how cloud-config yaml "dictionaries" provided as user-data are
+merged together when there are multiple yamls to merge together (say when
+performing an #include).
Since previously the merging algorithm was very simple and would only overwrite
and not append lists, or strings, and so on it was decided to create a new and
improved way to merge dictionaries (and there contained objects) together in a
-way that is customizable, thus allowing for users who provide cloud-config data
-to determine exactly how there objects will be merged.
+way that is customizable, thus allowing for users who provide cloud-config
+user-data to determine exactly how there objects will be merged.
For example.
@@ -19,13 +20,13 @@ For example.
run_cmd:
- bash1
- bash2
-
+
#cloud-config (2)
run_cmd:
- bash3
- bash4
-The previous way of merging the following 2 objects would result in a final
+The previous way of merging the following 2 objects would result in a final
cloud-config object that contains the following.
.. code-block:: yaml
@@ -56,7 +57,7 @@ Customizability
Since the above merging algorithm may not always be the desired merging
algorithm (like how the previous merging algorithm was not always the preferred
one) the concept of customizing how merging can be done was introduced through
-a new concept call 'merge classes'.
+a new concept call 'merge classes'.
A merge class is a class defintion which provides functions that can be used
to merge a given type with another given type.
@@ -69,7 +70,7 @@ An example of one of these merging classes is the following:
def __init__(self, merger, opts):
self._merger = merger
self._overwrite = 'overwrite' in opts
-
+
# This merging algorithm will attempt to merge with
# another dictionary, on encountering any other type of object
# it will not merge with said object, but will instead return
@@ -129,12 +130,12 @@ for your own usage.
definition are the following (in order), 'merge_how', 'merge_type'.
String format
-********
+*************
The string format that is expected is the following.
::
-
+
classname1(option1,option2)+classname2(option3,option4)....
The class name there will be connected to class names used when looking for the
@@ -144,11 +145,11 @@ on construction of that class.
For example, the default string that is used when none is provided is the following:
::
-
+
list()+dict()+str()
Dictionary format
-********
+*****************
In cases where a dictionary can be used to specify the same information as the
string format (ie option #2 of above) it can be used, for example.
@@ -171,7 +172,7 @@ for every cloud-config that I provide, what exactly happens?
The answer is that when merging, a stack of 'merging classes' is kept, the
first one on that stack is the default merging classes, this set of mergers
will be used when the first cloud-config is merged with the initial empty
-cloud-config dictionary. If the cloud-config that was just merged provided a
+cloud-config dictionary. If the cloud-config that was just merged provided a
set of merging classes (via the above formats) then those merging classes will
be pushed onto the stack. Now if there is a second cloud-config to be merged then
the merging classes from the cloud-config before the first will be used (not the
@@ -181,8 +182,13 @@ cloud-config dictionary coming after it.
Other uses
----------
-The default merging algorithm for merging 'conf.d' yaml files (which form a initial
-yaml config for cloud-init) was also changed to use this mechanism so its full
+In addition to being used for merging user-data sections, the default merging
+algorithm for merging 'conf.d' yaml files (which form an initial yaml config
+for cloud-init) was also changed to use this mechanism so its full
benefits (and customization) can also be used there as well. Other places that
-used the previous merging are also similar now extensible (metadata merging for
-example).
+used the previous merging are also, similarly, now extensible (metadata
+merging, for example).
+
+Note, however, that merge algorithms are not used *across* types of
+configuration. As was the case before merging was implemented,
+user-data will overwrite conf.d configuration without merging.
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
index 8a03f3c7..2bd87b16 100644
--- a/doc/rtd/topics/merging.rst
+++ b/doc/rtd/topics/merging.rst
@@ -1,5 +1,5 @@
-=========
-Merging
-=========
+==========================
+Merging User-Data Sections
+==========================
.. include:: ../../merging.rst
diff --git a/packages/bddeb b/packages/bddeb
index c141b1ab..1b0f642c 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -40,6 +40,8 @@ STD_NAMED_PACKAGES = [
'mock',
'nose',
'setuptools',
+ 'flake8',
+ 'hacking',
]
NONSTD_NAMED_PACKAGES = {
'argparse': ('python-argparse', None),
diff --git a/setup.py b/setup.py
index bc1d1fd0..8c8f2402 100755
--- a/setup.py
+++ b/setup.py
@@ -184,7 +184,6 @@ else:
(USR + '/share/doc/cloud-init/examples/seed',
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
(LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]),
- (LIB + '/udev', ['udev/cloud-init-wait']),
]
# Use a subclass for install that handles
# adding on the right init system configuration files
diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator
index ae286d58..2d319695 100755
--- a/systemd/cloud-init-generator
+++ b/systemd/cloud-init-generator
@@ -107,9 +107,6 @@ main() {
"ln $CLOUD_SYSTEM_TARGET $link_path"
fi
fi
- # this touches /run/cloud-init/enabled, which is read by
- # udev/cloud-init-wait. If not present, it will exit quickly.
- touch "$LOG_D/$ENABLE"
elif [ "$result" = "$DISABLE" ]; then
if [ -f "$link_path" ]; then
if rm -f "$link_path"; then
diff --git a/test-requirements.txt b/test-requirements.txt
index 7726a4fb..6bf38940 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -4,6 +4,9 @@ mock
nose
unittest2
+# Only needed if you want to know the test times
+# nose-timer
+
# Only really needed on older versions of python
contextlib2
setuptools
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 5395e544..b898e2bd 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -6,6 +6,8 @@ import six
import tempfile
from cloudinit import helpers
+from cloudinit.net import eni
+from cloudinit.net import network_state
from cloudinit import settings
from cloudinit.sources import DataSourceConfigDrive as ds
from cloudinit.sources.helpers import openstack
@@ -64,7 +66,7 @@ NETWORK_DATA = {
'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
{'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc',
'ethernet_mac_address': 'fa:16:3e:05:30:fe',
- 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04'}
+ 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'}
],
'networks': [
{'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp',
@@ -79,6 +81,35 @@ NETWORK_DATA = {
]
}
+NETWORK_DATA_2 = {
+ "services": [
+ {"type": "dns", "address": "1.1.1.191"},
+ {"type": "dns", "address": "1.1.1.4"}],
+ "networks": [
+ {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4",
+ "netmask": "255.255.255.248", "link": "eth0",
+ "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
+ "gateway": "2.2.2.9"}],
+ "ip_address": "2.2.2.10", "id": "network0-ipv4"},
+ {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4",
+ "netmask": "255.255.255.224", "link": "eth1",
+ "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}],
+ "links": [
+ {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500,
+ "type": "vif", "id": "eth0", "vif_id": "vif-foo1"},
+ {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500,
+ "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}]
+}
+
+
+KNOWN_MACS = {
+ 'fa:16:3e:69:b0:58': 'enp0s1',
+ 'fa:16:3e:d4:57:ad': 'enp0s2',
+ 'fa:16:3e:dd:50:9a': 'foo1',
+ 'fa:16:3e:a8:14:69': 'foo2',
+ 'fa:16:3e:ed:9a:59': 'foo3',
+}
+
CFG_DRIVE_FILES_V2 = {
'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
'ec2/2009-04-04/user-data': USER_DATA,
@@ -358,13 +389,14 @@ class TestNetJson(TestCase):
"""Verify that network_data is present in ds in config-drive-v2."""
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
myds = cfg_ds_from_dir(self.tmp)
- self.assertEqual(myds.network_json, NETWORK_DATA)
+ self.assertIsNotNone(myds.network_json)
def test_network_config_is_converted(self):
"""Verify that network_data is converted and present on ds object."""
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
myds = cfg_ds_from_dir(self.tmp)
- network_config = openstack.convert_net_json(NETWORK_DATA)
+ network_config = openstack.convert_net_json(NETWORK_DATA,
+ known_macs=KNOWN_MACS)
self.assertEqual(myds.network_config, network_config)
def test_network_config_conversions(self):
@@ -404,19 +436,22 @@ class TestNetJson(TestCase):
'subnets': [{'type': 'dhcp4'}],
'type': 'physical',
'mac_address': 'fa:16:3e:69:b0:58',
- 'name': 'tap2ecc7709-b3',
+ 'name': 'enp0s1',
+ 'mtu': None,
},
{
'subnets': [{'type': 'dhcp4'}],
'type': 'physical',
'mac_address': 'fa:16:3e:d4:57:ad',
- 'name': 'tap2f88d109-5b',
+ 'name': 'enp0s2',
+ 'mtu': None,
},
{
'subnets': [{'type': 'dhcp4'}],
'type': 'physical',
'mac_address': 'fa:16:3e:05:30:fe',
- 'name': 'tap1a5382f8-04',
+ 'name': 'nic0',
+ 'mtu': None,
},
{
'type': 'nameserver',
@@ -433,10 +468,10 @@ class TestNetJson(TestCase):
'version': 1,
'config': [
{
- 'name': 'tap1a81968a-79',
+ 'name': 'foo3',
'mac_address': 'fa:16:3e:ed:9a:59',
'mtu': None,
- 'type': 'bridge',
+ 'type': 'physical',
'subnets': [
{
'address': '172.19.1.34',
@@ -459,20 +494,90 @@ class TestNetJson(TestCase):
},
]
for in_data, out_data in zip(in_datas, out_datas):
- self.assertEqual(openstack.convert_net_json(in_data),
- out_data)
+ conv_data = openstack.convert_net_json(in_data,
+ known_macs=KNOWN_MACS)
+ self.assertEqual(out_data, conv_data)
+
+
+class TestConvertNetworkData(TestCase):
+ def setUp(self):
+ super(TestConvertNetworkData, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def _getnames_in_config(self, ncfg):
+ return set([n['name'] for n in ncfg['config']
+ if n['type'] == 'physical'])
+
+ def test_conversion_fills_names(self):
+ ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS)
+ expected = set(['nic0', 'enp0s1', 'enp0s2'])
+ found = self._getnames_in_config(ncfg)
+ self.assertEqual(found, expected)
+
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac):
+ macs = KNOWN_MACS.copy()
+ macs.update({'fa:16:3e:05:30:fe': 'foonic1',
+ 'fa:16:3e:69:b0:58': 'ens1'})
+ get_interfaces_by_mac.return_value = macs
+
+ ncfg = openstack.convert_net_json(NETWORK_DATA)
+ expected = set(['nic0', 'ens1', 'enp0s2'])
+ found = self._getnames_in_config(ncfg)
+ self.assertEqual(found, expected)
+
+ def test_convert_raises_value_error_on_missing_name(self):
+ macs = {'aa:aa:aa:aa:aa:00': 'ens1'}
+ self.assertRaises(ValueError, openstack.convert_net_json,
+ NETWORK_DATA, known_macs=macs)
+
+ def test_conversion_with_route(self):
+ ncfg = openstack.convert_net_json(NETWORK_DATA_2,
+ known_macs=KNOWN_MACS)
+ # not the best test, but see that we get a route in the
+ # network config and that it gets rendered to an ENI file
+ routes = []
+ for n in ncfg['config']:
+ for s in n.get('subnets', []):
+ routes.extend(s.get('routes', []))
+ self.assertIn(
+ {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'},
+ routes)
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ self.tmp, network_state.parse_net_config_data(ncfg))
+ with open(os.path.join(self.tmp, "etc",
+ "network", "interfaces"), 'r') as f:
+ eni_rendering = f.read()
+ self.assertIn("route add default gw 2.2.2.9", eni_rendering)
def cfg_ds_from_dir(seed_d):
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None,
helpers.Paths({}))
cfg_ds.seed_dir = seed_d
+ cfg_ds.known_macs = KNOWN_MACS.copy()
if not cfg_ds.get_data(skip_first_boot=True):
raise RuntimeError("Data source did not extract itself from"
" seed directory %s" % seed_d)
return cfg_ds
+def populate_ds_from_read_config(cfg_ds, source, results):
+ """Patch the DataSourceConfigDrive from the results of
+ read_config_drive_dir hopefully in line with what it would have
+ if cfg_ds.get_data had been successfully called"""
+ cfg_ds.source = source
+ cfg_ds.metadata = results.get('metadata')
+ cfg_ds.ec2_metadata = results.get('ec2-metadata')
+ cfg_ds.userdata_raw = results.get('userdata')
+ cfg_ds.version = results.get('version')
+ cfg_ds.network_json = results.get('networkdata')
+ cfg_ds._network_config = openstack.convert_net_json(
+ cfg_ds.network_json, known_macs=KNOWN_MACS)
+
+
def populate_dir(seed_dir, files):
for (name, content) in files.items():
path = os.path.join(seed_dir, name)
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 4140d054..5c8592c5 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -135,13 +135,17 @@ def _register_uris(version, ec2_files, ec2_meta, os_files):
body=get_request_callback)
+def _read_metadata_service():
+ return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1)
+
+
class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
VERSION = 'latest'
@hp.activate
def test_successful(self):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- f = ds.read_metadata_service(BASE_URL)
+ f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get('vendordata'))
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
@@ -163,7 +167,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
@hp.activate
def test_no_ec2(self):
_register_uris(self.VERSION, {}, {}, OS_FILES)
- f = ds.read_metadata_service(BASE_URL)
+ f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get('vendordata'))
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
@@ -178,8 +182,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
if k.endswith('meta_data.json'):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.NonReadable, ds.read_metadata_service,
- BASE_URL)
+ self.assertRaises(openstack.NonReadable, _read_metadata_service)
@hp.activate
def test_bad_uuid(self):
@@ -190,8 +193,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
if k.endswith('meta_data.json'):
os_files[k] = json.dumps(os_meta)
_register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service,
- BASE_URL)
+ self.assertRaises(openstack.BrokenMetadata, _read_metadata_service)
@hp.activate
def test_userdata_empty(self):
@@ -200,7 +202,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
if k.endswith('user_data'):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
- f = ds.read_metadata_service(BASE_URL)
+ f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get('vendordata'))
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
@@ -213,7 +215,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
if k.endswith('vendor_data.json'):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
- f = ds.read_metadata_service(BASE_URL)
+ f = _read_metadata_service()
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('vendordata'))
@@ -225,8 +227,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
if k.endswith('vendor_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service,
- BASE_URL)
+ self.assertRaises(openstack.BrokenMetadata, _read_metadata_service)
@hp.activate
def test_metadata_invalid(self):
@@ -235,8 +236,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
if k.endswith('meta_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service,
- BASE_URL)
+ self.assertRaises(openstack.BrokenMetadata, _read_metadata_service)
@hp.activate
def test_datasource(self):
@@ -245,7 +245,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
None,
helpers.Paths({}))
self.assertIsNone(ds_os.version)
- found = ds_os.get_data()
+ found = ds_os.get_data(timeout=0.1, retries=0)
self.assertTrue(found)
self.assertEqual(2, ds_os.version)
md = dict(ds_os.metadata)
@@ -269,7 +269,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
None,
helpers.Paths({}))
self.assertIsNone(ds_os.version)
- found = ds_os.get_data()
+ found = ds_os.get_data(timeout=0.1, retries=0)
self.assertFalse(found)
self.assertIsNone(ds_os.version)
@@ -288,7 +288,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
'timeout': 0,
}
self.assertIsNone(ds_os.version)
- found = ds_os.get_data()
+ found = ds_os.get_data(timeout=0.1, retries=0)
self.assertFalse(found)
self.assertIsNone(ds_os.version)
@@ -311,7 +311,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
'timeout': 0,
}
self.assertIsNone(ds_os.version)
- found = ds_os.get_data()
+ found = ds_os.get_data(timeout=0.1, retries=0)
self.assertFalse(found)
self.assertIsNone(ds_os.version)
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 28f56039..ddbb8fbd 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -25,6 +25,7 @@
from __future__ import print_function
from binascii import crc32
+import json
import os
import os.path
import re
@@ -41,8 +42,49 @@ import six
from cloudinit import helpers as c_helpers
from cloudinit.util import b64e
-from .. import helpers
-from ..helpers import mock
+from ..helpers import mock, FilesystemMockingTestCase, TestCase
+
+SDC_NICS = json.loads("""
+[
+ {
+ "nic_tag": "external",
+ "primary": true,
+ "mtu": 1500,
+ "model": "virtio",
+ "gateway": "8.12.42.1",
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.102",
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "vlan_id": 324,
+ "mac": "90:b8:d0:f5:e4:f5",
+ "interface": "net0",
+ "ips": [
+ "8.12.42.102/24"
+ ]
+ },
+ {
+ "nic_tag": "sdc_overlay/16187209",
+ "gateway": "192.168.128.1",
+ "model": "virtio",
+ "mac": "90:b8:d0:a5:ff:cd",
+ "netmask": "255.255.252.0",
+ "ip": "192.168.128.93",
+ "network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9",
+ "gateways": [
+ "192.168.128.1"
+ ],
+ "vlan_id": 2,
+ "mtu": 8500,
+ "interface": "net1",
+ "ips": [
+ "192.168.128.93/22"
+ ]
+ }
+]
+""")
MOCK_RETURNS = {
'hostname': 'test-host',
@@ -57,79 +99,66 @@ MOCK_RETURNS = {
'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
'user-data': '\n'.join(['something', '']),
'user-script': '\n'.join(['/bin/true', '']),
+ 'sdc:nics': json.dumps(SDC_NICS),
}
DMI_DATA_RETURN = 'smartdc'
-def get_mock_client(mockdata):
- class MockMetadataClient(object):
+class PsuedoJoyentClient(object):
+ def __init__(self, data=None):
+ if data is None:
+ data = MOCK_RETURNS.copy()
+ self.data = data
+ return
- def __init__(self, serial):
- pass
+ def get(self, key, default=None, strip=False):
+ if key in self.data:
+ r = self.data[key]
+ if strip:
+ r = r.strip()
+ else:
+ r = default
+ return r
- def get_metadata(self, metadata_key):
- return mockdata.get(metadata_key)
- return MockMetadataClient
+ def get_json(self, key, default=None):
+ result = self.get(key, default=default)
+ if result is None:
+ return default
+ return json.loads(result)
+ def exists(self):
+ return True
-class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
+
+class TestSmartOSDataSource(FilesystemMockingTestCase):
def setUp(self):
super(TestSmartOSDataSource, self).setUp()
+ dsmos = 'cloudinit.sources.DataSourceSmartOS'
+ patcher = mock.patch(dsmos + ".jmc_client_factory")
+ self.jmc_cfact = patcher.start()
+ self.addCleanup(patcher.stop)
+ patcher = mock.patch(dsmos + ".get_smartos_environ")
+ self.get_smartos_environ = patcher.start()
+ self.addCleanup(patcher.stop)
+
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
- self.legacy_user_d = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.legacy_user_d)
-
- # If you should want to watch the logs...
- self._log = None
- self._log_file = None
- self._log_handler = None
-
- # patch cloud_dir, so our 'seed_dir' is guaranteed empty
self.paths = c_helpers.Paths({'cloud_dir': self.tmp})
- self.unapply = []
- super(TestSmartOSDataSource, self).setUp()
+ self.legacy_user_d = tempfile.mkdtemp()
+ self.orig_lud = DataSourceSmartOS.LEGACY_USER_D
+ DataSourceSmartOS.LEGACY_USER_D = self.legacy_user_d
def tearDown(self):
- helpers.FilesystemMockingTestCase.tearDown(self)
- if self._log_handler and self._log:
- self._log.removeHandler(self._log_handler)
- apply_patches([i for i in reversed(self.unapply)])
+ DataSourceSmartOS.LEGACY_USER_D = self.orig_lud
super(TestSmartOSDataSource, self).tearDown()
- def _patchIn(self, root):
- self.restore()
- self.patchOS(root)
- self.patchUtils(root)
-
- def apply_patches(self, patches):
- ret = apply_patches(patches)
- self.unapply += ret
-
- def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None,
- is_lxbrand=False):
- mod = DataSourceSmartOS
-
- if mockdata is None:
- mockdata = MOCK_RETURNS
-
- if dmi_data is None:
- dmi_data = DMI_DATA_RETURN
-
- def _dmi_data():
- return dmi_data
-
- def _os_uname():
- if not is_lxbrand:
- # LP: #1243287. tests assume this runs, but running test on
- # arm would cause them all to fail.
- return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64')
- else:
- return ('LINUX', 'NODENAME', 'RELEASE', 'BRANDZ VIRTUAL LINUX',
- 'X86_64')
+ def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
+ sys_cfg=None, ds_cfg=None):
+ self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata)
+ self.get_smartos_environ.return_value = mode
if sys_cfg is None:
sys_cfg = {}
@@ -138,44 +167,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
sys_cfg['datasource'] = sys_cfg.get('datasource', {})
sys_cfg['datasource']['SmartOS'] = ds_cfg
- self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)])
- self.apply_patches([
- (mod, 'JoyentMetadataClient', get_mock_client(mockdata))])
- self.apply_patches([(mod, 'dmi_data', _dmi_data)])
- self.apply_patches([(os, 'uname', _os_uname)])
- self.apply_patches([(mod, 'device_exists', lambda d: True)])
- dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None,
- paths=self.paths)
- self.apply_patches([(dsrc, '_get_seed_file_object', mock.MagicMock())])
- return dsrc
-
- def test_seed(self):
- # default seed should be /dev/ttyS1
- dsrc = self._get_ds()
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual('kvm', dsrc.smartos_type)
- self.assertEqual('/dev/ttyS1', dsrc.seed)
-
- def test_seed_lxbrand(self):
- # default seed should be /dev/ttyS1
- dsrc = self._get_ds(is_lxbrand=True)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual('lx-brand', dsrc.smartos_type)
- self.assertEqual('/native/.zonecontrol/metadata.sock', dsrc.seed)
-
- def test_issmartdc(self):
- dsrc = self._get_ds()
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertTrue(dsrc.is_smartdc)
-
- def test_issmartdc_lxbrand(self):
- dsrc = self._get_ds(is_lxbrand=True)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertTrue(dsrc.is_smartdc)
+ return DataSourceSmartOS.DataSourceSmartOS(
+ sys_cfg, distro=None, paths=self.paths)
def test_no_base64(self):
ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
@@ -211,58 +204,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
self.assertEqual(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
- def test_base64_all(self):
- # metadata provided base64_all of true
- my_returns = MOCK_RETURNS.copy()
- my_returns['base64_all'] = "true"
- for k in ('hostname', 'cloud-init:user-data'):
- my_returns[k] = b64e(my_returns[k])
-
- dsrc = self._get_ds(mockdata=my_returns)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
- self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
- dsrc.userdata_raw)
- self.assertEqual(MOCK_RETURNS['root_authorized_keys'],
- dsrc.metadata['public-keys'])
- self.assertEqual(MOCK_RETURNS['disable_iptables_flag'],
- dsrc.metadata['iptables_disable'])
- self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'],
- dsrc.metadata['motd_sys_info'])
-
- def test_b64_userdata(self):
- my_returns = MOCK_RETURNS.copy()
- my_returns['b64-cloud-init:user-data'] = "true"
- my_returns['b64-hostname'] = "true"
- for k in ('hostname', 'cloud-init:user-data'):
- my_returns[k] = b64e(my_returns[k])
-
- dsrc = self._get_ds(mockdata=my_returns)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
- self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
- dsrc.userdata_raw)
- self.assertEqual(MOCK_RETURNS['root_authorized_keys'],
- dsrc.metadata['public-keys'])
-
- def test_b64_keys(self):
- my_returns = MOCK_RETURNS.copy()
- my_returns['base64_keys'] = 'hostname,ignored'
- for k in ('hostname',):
- my_returns[k] = b64e(my_returns[k])
-
- dsrc = self._get_ds(mockdata=my_returns)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
- self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
- dsrc.userdata_raw)
-
def test_userdata(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
@@ -272,6 +213,13 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
dsrc.userdata_raw)
+ def test_sdc_nics(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']),
+ dsrc.metadata['network-data'])
+
def test_sdc_scripts(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
@@ -427,18 +375,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
mydscfg['disk_aliases']['FOO'])
-def apply_patches(patches):
- ret = []
- for (ref, name, replace) in patches:
- if replace is None:
- continue
- orig = getattr(ref, name)
- setattr(ref, name, replace)
- ret.append((ref, name, orig))
- return ret
-
-
-class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
+class TestJoyentMetadataClient(FilesystemMockingTestCase):
def setUp(self):
super(TestJoyentMetadataClient, self).setUp()
@@ -479,7 +416,8 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
mock.Mock(return_value=self.request_id)))
def _get_client(self):
- return DataSourceSmartOS.JoyentMetadataClient(self.serial)
+ return DataSourceSmartOS.JoyentMetadataClient(
+ fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM)
def assertEndsWith(self, haystack, prefix):
self.assertTrue(haystack.endswith(prefix),
@@ -493,7 +431,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
def test_get_metadata_writes_a_single_line(self):
client = self._get_client()
- client.get_metadata('some_key')
+ client.get('some_key')
self.assertEqual(1, self.serial.write.call_count)
written_line = self.serial.write.call_args[0][0]
print(type(written_line))
@@ -503,7 +441,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
def _get_written_line(self, key='some_key'):
client = self._get_client()
- client.get_metadata(key)
+ client.get(key)
return self.serial.write.call_args[0][0]
def test_get_metadata_writes_bytes(self):
@@ -547,32 +485,32 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
def test_get_metadata_reads_a_line(self):
client = self._get_client()
- client.get_metadata('some_key')
+ client.get('some_key')
self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
def test_get_metadata_returns_valid_value(self):
client = self._get_client()
- value = client.get_metadata('some_key')
+ value = client.get('some_key')
self.assertEqual(self.metadata_value, value)
def test_get_metadata_throws_exception_for_incorrect_length(self):
self.response_parts['length'] = 0
client = self._get_client()
self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get_metadata, 'some_key')
+ client.get, 'some_key')
def test_get_metadata_throws_exception_for_incorrect_crc(self):
self.response_parts['crc'] = 'deadbeef'
client = self._get_client()
self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get_metadata, 'some_key')
+ client.get, 'some_key')
def test_get_metadata_throws_exception_for_request_id_mismatch(self):
self.response_parts['request_id'] = 'deadbeef'
client = self._get_client()
client._checksum = lambda _: self.response_parts['crc']
self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get_metadata, 'some_key')
+ client.get, 'some_key')
def test_get_metadata_returns_None_if_value_not_found(self):
self.response_parts['payload'] = ''
@@ -580,4 +518,24 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
self.response_parts['length'] = 17
client = self._get_client()
client._checksum = lambda _: self.response_parts['crc']
- self.assertIsNone(client.get_metadata('some_key'))
+ self.assertIsNone(client.get('some_key'))
+
+
+class TestNetworkConversion(TestCase):
+
+ def test_convert_simple(self):
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'name': 'net0', 'type': 'physical',
+ 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
+ 'netmask': '255.255.255.0',
+ 'address': '8.12.42.102/24'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'},
+ {'name': 'net1', 'type': 'physical',
+ 'subnets': [{'type': 'static', 'gateway': '192.168.128.1',
+ 'netmask': '255.255.252.0',
+ 'address': '192.168.128.93/22'}],
+ 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]}
+ found = DataSourceSmartOS.convert_smartos_network_data(SDC_NICS)
+ self.assertEqual(expected, found)
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list.py
new file mode 100644
index 00000000..5d0417a2
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list.py
@@ -0,0 +1,165 @@
+""" test_handler_apt_configure_sources_list
+Test templating of sources list
+"""
+import logging
+import os
+import shutil
+import tempfile
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import templater
+from cloudinit import util
+
+from cloudinit.config import cc_apt_configure
+from cloudinit.sources import DataSourceNone
+
+from .. import helpers as t_help
+
+LOG = logging.getLogger(__name__)
+
+YAML_TEXT_CUSTOM_SL = """
+apt_mirror: http://archive.ubuntu.com/ubuntu/
+apt_custom_sources_list: |
+ ## template:jinja
+ ## Note, this file is written by cloud-init on first boot of an instance
+ ## modifications made here will not survive a re-bundle.
+ ## if you wish to make changes you can:
+ ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
+ ## or do the same in user-data
+ ## b.) add sources in /etc/apt/sources.list.d
+ ## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
+
+ # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
+ # newer versions of the distribution.
+ deb {{mirror}} {{codename}} main restricted
+ deb-src {{mirror}} {{codename}} main restricted
+ # FIND_SOMETHING_SPECIAL
+"""
+
+EXPECTED_CONVERTED_CONTENT = (
+ """## Note, this file is written by cloud-init on first boot of an instance
+## modifications made here will not survive a re-bundle.
+## if you wish to make changes you can:
+## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
+## or do the same in user-data
+## b.) add sources in /etc/apt/sources.list.d
+## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
+
+# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
+# newer versions of the distribution.
+deb http://archive.ubuntu.com/ubuntu/ fakerelease main restricted
+deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted
+# FIND_SOMETHING_SPECIAL
+""")
+
+
+def load_tfile_or_url(*args, **kwargs):
+ """load_tfile_or_url
+ load file and return content after decoding
+ """
+ return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
+
+
+class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
+ """TestAptSourceConfigSourceList
+ Main Class to test sources list rendering
+ """
+ def setUp(self):
+ super(TestAptSourceConfigSourceList, self).setUp()
+ self.subp = util.subp
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+
+ def _get_cloud(self, distro, metadata=None):
+ self.patchUtils(self.new_root)
+ paths = helpers.Paths({})
+ cls = distros.fetch(distro)
+ mydist = cls(distro, {}, paths)
+ myds = DataSourceNone.DataSourceNone({}, mydist, paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ return cloud.Cloud(myds, paths, {}, mydist, None)
+
+ def apt_source_list(self, distro, mirror, mirrorcheck=None):
+ """apt_source_list
+ Test rendering of a source.list from template for a given distro
+ """
+ if mirrorcheck is None:
+ mirrorcheck = mirror
+
+ if isinstance(mirror, list):
+ cfg = {'apt_mirror_search': mirror}
+ else:
+ cfg = {'apt_mirror': mirror}
+ mycloud = self._get_cloud(distro)
+
+ with mock.patch.object(templater, 'render_to_file') as mocktmpl:
+ with mock.patch.object(os.path, 'isfile',
+ return_value=True) as mockisfile:
+ cc_apt_configure.handle("notimportant", cfg, mycloud,
+ LOG, None)
+
+ mockisfile.assert_any_call(
+ ('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
+ mocktmpl.assert_called_once_with(
+ ('/etc/cloud/templates/sources.list.%s.tmpl' % distro),
+ '/etc/apt/sources.list',
+ {'codename': '', 'primary': mirrorcheck, 'mirror': mirrorcheck})
+
+ def test_apt_source_list_debian(self):
+ """test_apt_source_list_debian
+ Test rendering of a source.list from template for debian
+ """
+ self.apt_source_list('debian', 'http://httpredir.debian.org/debian')
+
+ def test_apt_source_list_ubuntu(self):
+ """test_apt_source_list_ubuntu
+ Test rendering of a source.list from template for ubuntu
+ """
+ self.apt_source_list('ubuntu', 'http://archive.ubuntu.com/ubuntu/')
+
+ def test_apt_srcl_debian_mirrorfail(self):
+ """test_apt_source_list_debian_mirrorfail
+ Test rendering of a source.list from template for debian
+ """
+ self.apt_source_list('debian', ['http://does.not.exist',
+ 'http://httpredir.debian.org/debian'],
+ 'http://httpredir.debian.org/debian')
+
+ def test_apt_srcl_ubuntu_mirrorfail(self):
+ """test_apt_source_list_ubuntu_mirrorfail
+ Test rendering of a source.list from template for ubuntu
+ """
+ self.apt_source_list('ubuntu', ['http://does.not.exist',
+ 'http://archive.ubuntu.com/ubuntu/'],
+ 'http://archive.ubuntu.com/ubuntu/')
+
+ def test_apt_srcl_custom(self):
+ """test_apt_srcl_custom
+ Test rendering from a custom source.list template
+ """
+ cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
+ mycloud = self._get_cloud('ubuntu')
+
+ # the second mock restores the original subp
+ with mock.patch.object(util, 'write_file') as mockwrite:
+ with mock.patch.object(util, 'subp', self.subp):
+ with mock.patch.object(cc_apt_configure, 'get_release',
+ return_value='fakerelease'):
+ cc_apt_configure.handle("notimportant", cfg, mycloud,
+ LOG, None)
+
+ mockwrite.assert_called_once_with(
+ '/etc/apt/sources.list',
+ EXPECTED_CONVERTED_CONTENT,
+ mode=420)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_source.py b/tests/unittests/test_handler/test_handler_apt_source.py
new file mode 100644
index 00000000..4dbe69f0
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_apt_source.py
@@ -0,0 +1,551 @@
+""" test_handler_apt_source
+Testing various config variations of the apt_source config
+"""
+import os
+import re
+import shutil
+import tempfile
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from mock import call
+
+from cloudinit.config import cc_apt_configure
+from cloudinit import util
+
+from ..helpers import TestCase
+
+EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
+NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
+8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
+HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
+CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
+OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
+FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
+S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
+=ACB2
+-----END PGP PUBLIC KEY BLOCK-----"""
+
+
+def load_tfile_or_url(*args, **kwargs):
+ """load_tfile_or_url
+ load file and return content after decoding
+ """
+ return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
+
+
+class TestAptSourceConfig(TestCase):
+ """TestAptSourceConfig
+ Main Class to test apt_source configs
+ """
+ def setUp(self):
+ super(TestAptSourceConfig, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+ self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
+ self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
+ self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
+ self.join = os.path.join
+ # mock fallback filename into writable tmp dir
+ self.fallbackfn = os.path.join(self.tmp, "etc/apt/sources.list.d/",
+ "cloud_config_sources.list")
+
+ @staticmethod
+ def _get_default_params():
+ """get_default_params
+ Get the most basic default mrror and release info to be used in tests
+ """
+ params = {}
+ params['RELEASE'] = cc_apt_configure.get_release()
+ params['MIRROR'] = "http://archive.ubuntu.com/ubuntu"
+ return params
+
+ def myjoin(self, *args, **kwargs):
+ """myjoin - redir into writable tmpdir"""
+ if (args[0] == "/etc/apt/sources.list.d/" and
+ args[1] == "cloud_config_sources.list" and
+ len(args) == 2):
+ return self.join(self.tmp, args[0].lstrip("/"), args[1])
+ else:
+ return self.join(*args, **kwargs)
+
+ def apt_src_basic(self, filename, cfg):
+ """apt_src_basic
+ Test Fix deb source string, has to overwrite mirror conf in params
+ """
+ params = self._get_default_params()
+
+ cc_apt_configure.add_sources(cfg, params)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = load_tfile_or_url(filename)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", "http://archive.ubuntu.com/ubuntu",
+ "karmic-backports",
+ "main universe multiverse restricted"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_src_basic(self):
+ """test_apt_src_basic
+ Test Fix deb source string, has to overwrite mirror conf in params.
+ Test with a filename provided in config.
+ """
+ cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu'
+ ' karmic-backports'
+ ' main universe multiverse restricted'),
+ 'filename': self.aptlistfile}
+ self.apt_src_basic(self.aptlistfile, [cfg])
+
+ def test_apt_src_basic_dict(self):
+ """test_apt_src_basic_dict
+ Test Fix deb source string, has to overwrite mirror conf in params.
+ Test with a filename provided in config.
+ Provided in a dictionary with filename being the key (new format)
+ """
+ cfg = {self.aptlistfile: {'source':
+ ('deb http://archive.ubuntu.com/ubuntu'
+ ' karmic-backports'
+ ' main universe multiverse restricted')}}
+ self.apt_src_basic(self.aptlistfile, cfg)
+
+ def apt_src_basic_tri(self, cfg):
+ """apt_src_basic_tri
+ Test Fix three deb source string, has to overwrite mirror conf in
+ params. Test with filenames provided in config.
+ generic part to check three files with different content
+ """
+ self.apt_src_basic(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ contents = load_tfile_or_url(self.aptlistfile2)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", "http://archive.ubuntu.com/ubuntu",
+ "precise-backports",
+ "main universe multiverse restricted"),
+ contents, flags=re.IGNORECASE))
+ contents = load_tfile_or_url(self.aptlistfile3)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", "http://archive.ubuntu.com/ubuntu",
+ "lucid-backports",
+ "main universe multiverse restricted"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_src_basic_tri(self):
+ """test_apt_src_basic_tri
+ Test Fix three deb source string, has to overwrite mirror conf in
+ params. Test with filenames provided in config.
+ """
+ cfg1 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
+ ' karmic-backports'
+ ' main universe multiverse restricted'),
+ 'filename': self.aptlistfile}
+ cfg2 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
+ ' precise-backports'
+ ' main universe multiverse restricted'),
+ 'filename': self.aptlistfile2}
+ cfg3 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
+ ' lucid-backports'
+ ' main universe multiverse restricted'),
+ 'filename': self.aptlistfile3}
+ self.apt_src_basic_tri([cfg1, cfg2, cfg3])
+
+ def test_apt_src_basic_dict_tri(self):
+ """test_apt_src_basic_dict_tri
+ Test Fix three deb source string, has to overwrite mirror conf in
+ params. Test with filenames provided in config.
+ Provided in a dictionary with filename being the key (new format)
+ """
+ cfg = {self.aptlistfile: {'source':
+ ('deb http://archive.ubuntu.com/ubuntu'
+ ' karmic-backports'
+ ' main universe multiverse restricted')},
+ self.aptlistfile2: {'source':
+ ('deb http://archive.ubuntu.com/ubuntu'
+ ' precise-backports'
+ ' main universe multiverse restricted')},
+ self.aptlistfile3: {'source':
+ ('deb http://archive.ubuntu.com/ubuntu'
+ ' lucid-backports'
+ ' main universe multiverse restricted')}}
+ self.apt_src_basic_tri(cfg)
+
+ def test_apt_src_basic_nofn(self):
+ """test_apt_src_basic_nofn
+ Test Fix deb source string, has to overwrite mirror conf in params.
+ Test without a filename provided in config and test for known fallback.
+ """
+ cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu'
+ ' karmic-backports'
+ ' main universe multiverse restricted')}
+ with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
+ self.apt_src_basic(self.fallbackfn, [cfg])
+
+ def apt_src_replacement(self, filename, cfg):
+ """apt_src_replace
+ Test Autoreplacement of MIRROR and RELEASE in source specs
+ """
+ params = self._get_default_params()
+ cc_apt_configure.add_sources(cfg, params)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = load_tfile_or_url(filename)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", params['MIRROR'], params['RELEASE'],
+ "multiverse"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_src_replace(self):
+ """test_apt_src_replace
+ Test Autoreplacement of MIRROR and RELEASE in source specs with
+ Filename being set
+ """
+ cfg = {'source': 'deb $MIRROR $RELEASE multiverse',
+ 'filename': self.aptlistfile}
+ self.apt_src_replacement(self.aptlistfile, [cfg])
+
+ def apt_src_replace_tri(self, cfg):
+ """apt_src_replace_tri
+ Test three autoreplacements of MIRROR and RELEASE in source specs with
+ generic part
+ """
+ self.apt_src_replacement(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ params = self._get_default_params()
+ contents = load_tfile_or_url(self.aptlistfile2)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", params['MIRROR'], params['RELEASE'],
+ "main"),
+ contents, flags=re.IGNORECASE))
+ contents = load_tfile_or_url(self.aptlistfile3)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", params['MIRROR'], params['RELEASE'],
+ "universe"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_src_replace_tri(self):
+ """test_apt_src_replace_tri
+ Test three autoreplacements of MIRROR and RELEASE in source specs with
+ Filename being set
+ """
+ cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
+ 'filename': self.aptlistfile}
+ cfg2 = {'source': 'deb $MIRROR $RELEASE main',
+ 'filename': self.aptlistfile2}
+ cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
+ 'filename': self.aptlistfile3}
+ self.apt_src_replace_tri([cfg1, cfg2, cfg3])
+
+ def test_apt_src_replace_dict_tri(self):
+ """test_apt_src_replace_dict_tri
+ Test three autoreplacements of MIRROR and RELEASE in source specs with
+ Filename being set
+ Provided in a dictionary with filename being the key (new format)
+ We also test a new special conditions of the new format that allows
+ filenames to be overwritten inside the directory entry.
+ """
+ cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'},
+ 'notused': {'source': 'deb $MIRROR $RELEASE main',
+ 'filename': self.aptlistfile2},
+ self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}}
+ self.apt_src_replace_tri(cfg)
+
+ def test_apt_src_replace_nofn(self):
+ """test_apt_src_replace_nofn
+ Test Autoreplacement of MIRROR and RELEASE in source specs with
+ No filename being set
+ """
+ cfg = {'source': 'deb $MIRROR $RELEASE multiverse'}
+ with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
+ self.apt_src_replacement(self.fallbackfn, [cfg])
+
+ def apt_src_keyid(self, filename, cfg, keynum):
+ """apt_src_keyid
+ Test specification of a source + keyid
+ """
+ params = self._get_default_params()
+
+ with mock.patch.object(util, 'subp',
+ return_value=('fakekey 1234', '')) as mockobj:
+ cc_apt_configure.add_sources(cfg, params)
+
+ # check if it added the right ammount of keys
+ calls = []
+ for _ in range(keynum):
+ calls.append(call(('apt-key', 'add', '-'), 'fakekey 1234'))
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = load_tfile_or_url(filename)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb",
+ ('http://ppa.launchpad.net/smoser/'
+ 'cloud-init-test/ubuntu'),
+ "xenial", "main"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_src_keyid(self):
+ """test_apt_src_keyid
+ Test specification of a source + keyid with filename being set
+ """
+ cfg = {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial main'),
+ 'keyid': "03683F77",
+ 'filename': self.aptlistfile}
+ self.apt_src_keyid(self.aptlistfile, [cfg], 1)
+
+ def test_apt_src_keyid_tri(self):
+ """test_apt_src_keyid_tri
+ Test specification of a source + keyid with filename being set
+ Setting three of such, check for content and keys
+ """
+ cfg1 = {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial main'),
+ 'keyid': "03683F77",
+ 'filename': self.aptlistfile}
+ cfg2 = {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial universe'),
+ 'keyid': "03683F77",
+ 'filename': self.aptlistfile2}
+ cfg3 = {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial multiverse'),
+ 'keyid': "03683F77",
+ 'filename': self.aptlistfile3}
+
+ self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3)
+ contents = load_tfile_or_url(self.aptlistfile2)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb",
+ ('http://ppa.launchpad.net/smoser/'
+ 'cloud-init-test/ubuntu'),
+ "xenial", "universe"),
+ contents, flags=re.IGNORECASE))
+ contents = load_tfile_or_url(self.aptlistfile3)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb",
+ ('http://ppa.launchpad.net/smoser/'
+ 'cloud-init-test/ubuntu'),
+ "xenial", "multiverse"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_src_keyid_nofn(self):
+ """test_apt_src_keyid_nofn
+ Test specification of a source + keyid without filename being set
+ """
+ cfg = {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial main'),
+ 'keyid': "03683F77"}
+ with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
+ self.apt_src_keyid(self.fallbackfn, [cfg], 1)
+
+ def apt_src_key(self, filename, cfg):
+ """apt_src_key
+ Test specification of a source + key
+ """
+ params = self._get_default_params()
+
+ with mock.patch.object(util, 'subp') as mockobj:
+ cc_apt_configure.add_sources([cfg], params)
+
+ mockobj.assert_called_with(('apt-key', 'add', '-'), 'fakekey 4321')
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = load_tfile_or_url(filename)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb",
+ ('http://ppa.launchpad.net/smoser/'
+ 'cloud-init-test/ubuntu'),
+ "xenial", "main"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_src_key(self):
+ """test_apt_src_key
+ Test specification of a source + key with filename being set
+ """
+ cfg = {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial main'),
+ 'key': "fakekey 4321",
+ 'filename': self.aptlistfile}
+ self.apt_src_key(self.aptlistfile, cfg)
+
+ def test_apt_src_key_nofn(self):
+ """test_apt_src_key_nofn
+ Test specification of a source + key without filename being set
+ """
+ cfg = {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial main'),
+ 'key': "fakekey 4321"}
+ with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
+ self.apt_src_key(self.fallbackfn, cfg)
+
+ def test_apt_src_keyonly(self):
+ """test_apt_src_keyonly
+ Test specification key without source
+ """
+ params = self._get_default_params()
+ cfg = {'key': "fakekey 4242",
+ 'filename': self.aptlistfile}
+
+ with mock.patch.object(util, 'subp') as mockobj:
+ cc_apt_configure.add_sources([cfg], params)
+
+ mockobj.assert_called_once_with(('apt-key', 'add', '-'),
+ 'fakekey 4242')
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_keyidonly(self):
+ """test_apt_src_keyidonly
+ Test specification of a keyid without source
+ """
+ params = self._get_default_params()
+ cfg = {'keyid': "03683F77",
+ 'filename': self.aptlistfile}
+
+ with mock.patch.object(util, 'subp',
+ return_value=('fakekey 1212', '')) as mockobj:
+ cc_apt_configure.add_sources([cfg], params)
+
+ mockobj.assert_called_with(('apt-key', 'add', '-'), 'fakekey 1212')
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_keyid_real(self):
+ """test_apt_src_keyid_real
+ Test specification of a keyid without source incl
+ up to addition of the key (nothing but add_key_raw mocked)
+ """
+ keyid = "03683F77"
+ params = self._get_default_params()
+ cfg = {'keyid': keyid,
+ 'filename': self.aptlistfile}
+
+ with mock.patch.object(cc_apt_configure, 'add_key_raw') as mockobj:
+ cc_apt_configure.add_sources([cfg], params)
+
+ mockobj.assert_called_with(EXPECTEDKEY)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_longkeyid_real(self):
+ """test_apt_src_longkeyid_real
+ Test specification of a long key fingerprint without source incl
+ up to addition of the key (nothing but add_key_raw mocked)
+ """
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ params = self._get_default_params()
+ cfg = {'keyid': keyid,
+ 'filename': self.aptlistfile}
+
+ with mock.patch.object(cc_apt_configure, 'add_key_raw') as mockobj:
+ cc_apt_configure.add_sources([cfg], params)
+
+ mockobj.assert_called_with(EXPECTEDKEY)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_ppa(self):
+ """test_apt_src_ppa
+ Test specification of a ppa
+ """
+ params = self._get_default_params()
+ cfg = {'source': 'ppa:smoser/cloud-init-test',
+ 'filename': self.aptlistfile}
+
+ # default matcher needed for ppa
+ matcher = re.compile(r'^[\w-]+:\w').search
+
+ with mock.patch.object(util, 'subp') as mockobj:
+ cc_apt_configure.add_sources([cfg], params, aa_repo_match=matcher)
+ mockobj.assert_called_once_with(['add-apt-repository',
+ 'ppa:smoser/cloud-init-test'])
+
+ # adding ppa should ignore filename (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_ppa_tri(self):
+ """test_apt_src_ppa_tri
+ Test specification of a ppa
+ """
+ params = self._get_default_params()
+ cfg1 = {'source': 'ppa:smoser/cloud-init-test',
+ 'filename': self.aptlistfile}
+ cfg2 = {'source': 'ppa:smoser/cloud-init-test2',
+ 'filename': self.aptlistfile2}
+ cfg3 = {'source': 'ppa:smoser/cloud-init-test3',
+ 'filename': self.aptlistfile3}
+
+ # default matcher needed for ppa
+ matcher = re.compile(r'^[\w-]+:\w').search
+
+ with mock.patch.object(util, 'subp') as mockobj:
+ cc_apt_configure.add_sources([cfg1, cfg2, cfg3], params,
+ aa_repo_match=matcher)
+ calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test']),
+ call(['add-apt-repository', 'ppa:smoser/cloud-init-test2']),
+ call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'])]
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # adding ppa should ignore all filenames (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+ self.assertFalse(os.path.isfile(self.aptlistfile2))
+ self.assertFalse(os.path.isfile(self.aptlistfile3))
+
+ def test_convert_to_new_format(self):
+ """test_convert_to_new_format
+ Test the conversion of old to new format
+ And the noop conversion of new to new format as well
+ """
+ cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
+ 'filename': self.aptlistfile}
+ cfg2 = {'source': 'deb $MIRROR $RELEASE main',
+ 'filename': self.aptlistfile2}
+ cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
+ 'filename': self.aptlistfile3}
+ checkcfg = {self.aptlistfile: {'filename': self.aptlistfile,
+ 'source': 'deb $MIRROR $RELEASE '
+ 'multiverse'},
+ self.aptlistfile2: {'filename': self.aptlistfile2,
+ 'source': 'deb $MIRROR $RELEASE main'},
+ self.aptlistfile3: {'filename': self.aptlistfile3,
+ 'source': 'deb $MIRROR $RELEASE '
+ 'universe'}}
+
+ newcfg = cc_apt_configure.convert_to_new_format([cfg1, cfg2, cfg3])
+ self.assertEqual(newcfg, checkcfg)
+
+ newcfg2 = cc_apt_configure.convert_to_new_format(newcfg)
+ self.assertEqual(newcfg2, checkcfg)
+
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_new_format(5)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
index 681f3780..a33ec184 100644
--- a/tests/unittests/test_merging.py
+++ b/tests/unittests/test_merging.py
@@ -125,9 +125,9 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
def test_seed_runs(self):
test_dicts = []
- for i in range(1, 50):
+ for i in range(1, 10):
base_dicts = []
- for j in range(1, 50):
+ for j in range(1, 10):
base_dicts.append(make_dict(5, i * j))
test_dicts.append(base_dicts)
for test in test_dicts:
diff --git a/udev/79-cloud-init-net-wait.rules b/udev/79-cloud-init-net-wait.rules
deleted file mode 100644
index 8344222a..00000000
--- a/udev/79-cloud-init-net-wait.rules
+++ /dev/null
@@ -1,10 +0,0 @@
-# cloud-init cold/hot-plug blocking mechanism
-# this file blocks further processing of network events
-# until cloud-init local has had a chance to read and apply network
-SUBSYSTEM!="net", GOTO="cloudinit_naming_end"
-ACTION!="add", GOTO="cloudinit_naming_end"
-
-IMPORT{program}="/lib/udev/cloud-init-wait"
-
-LABEL="cloudinit_naming_end"
-# vi: ts=4 expandtab syntax=udevrules
diff --git a/udev/cloud-init-wait b/udev/cloud-init-wait
deleted file mode 100755
index b434005d..00000000
--- a/udev/cloud-init-wait
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/sh
-
-CI_NET_READY="/run/cloud-init/network-config-ready"
-LOG="/run/cloud-init/${0##*/}.log"
-LOG_INIT=0
-MAX_WAIT=60
-DEBUG=0
-
-block_until_ready() {
- local fname="$1" max="$2"
- [ -f "$fname" ] && return 0
- # udevadm settle below will exit at the first of 3 conditions
- # 1.) timeout 2.) file exists 3.) all in-flight udev events are processed
- # since this is being run from a udev event, the 3 wont happen.
- # thus, this is essentially a inotify wait or timeout on a file in /run
- # that is created by cloud-init-local.
- udevadm settle "--timeout=$max" "--exit-if-exists=$fname"
-}
-
-log() {
- [ -n "${LOG}" ] || return
- [ "${DEBUG:-0}" = "0" ] && return
-
- if [ $LOG_INIT = 0 ]; then
- if [ -d "${LOG%/*}" ] || mkdir -p "${LOG%/*}"; then
- LOG_INIT=1
- else
- echo "${0##*/}: WARN: log init to ${LOG%/*}" 1>&2
- return
- fi
- elif [ "$LOG_INIT" = "-1" ]; then
- return
- fi
- local info="$$ $INTERFACE"
- if [ "$DEBUG" -gt 1 ]; then
- local up idle
- read up idle < /proc/uptime
- info="$$ $INTERFACE $up"
- fi
- echo "[$info]" "$@" >> "$LOG"
-}
-
-main() {
- local name="" readyfile="$CI_NET_READY"
- local info="INTERFACE=${INTERFACE} ID_NET_NAME=${ID_NET_NAME}"
- info="$info ID_NET_NAME_PATH=${ID_NET_NAME_PATH}"
- info="$info MAC_ADDRESS=${MAC_ADDRESS}"
- log "$info"
-
- ## Check to see if cloud-init.target is set. If cloud-init is
- ## disabled we do not want to do anything.
- if [ ! -f "/run/cloud-init/enabled" ]; then
- log "cloud-init disabled"
- return 0
- fi
-
- if [ "${INTERFACE#lo}" != "$INTERFACE" ]; then
- return 0
- fi
-
- block_until_ready "$readyfile" "$MAX_WAIT" ||
- { log "failed waiting for ready on $INTERFACE"; return 1; }
-
- log "net config ready"
-}
-
-main "$@"
-exit
-
-# vi: ts=4 expandtab