summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@brickies.net>2017-03-03 02:26:38 -0500
committerScott Moser <smoser@brickies.net>2017-03-03 02:26:38 -0500
commitd7004bcf269fe60e456de336ecda9a9d2fe50bfd (patch)
treecf49b1fbc06388d46fa435814d24d97c83476047
parent1de8720effd029727bb5ef7972e7e4d859a1b53a (diff)
parentc81ea53bbdc4ada9d2b52430e106aeb3c38b4e0a (diff)
downloadvyos-cloud-init-d7004bcf269fe60e456de336ecda9a9d2fe50bfd.tar.gz
vyos-cloud-init-d7004bcf269fe60e456de336ecda9a9d2fe50bfd.zip
merge from master at 0.7.9-47-gc81ea53
-rw-r--r--Makefile14
-rw-r--r--cloudinit/cmd/main.py166
-rw-r--r--cloudinit/config/cc_set_hostname.py2
-rw-r--r--cloudinit/distros/rhel.py19
-rw-r--r--cloudinit/ec2_utils.py5
-rw-r--r--cloudinit/helpers.py2
-rw-r--r--cloudinit/net/eni.py33
-rw-r--r--cloudinit/net/sysconfig.py8
-rw-r--r--cloudinit/settings.py2
-rw-r--r--cloudinit/sources/DataSourceAliYun.py4
-rw-r--r--cloudinit/sources/DataSourceEc2.py147
-rw-r--r--cloudinit/sources/DataSourceOVF.py37
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py15
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py24
-rw-r--r--cloudinit/ssh_util.py3
-rw-r--r--cloudinit/stages.py15
-rw-r--r--cloudinit/util.py44
-rw-r--r--cloudinit/warnings.py139
-rw-r--r--doc/examples/cloud-config.txt2
-rw-r--r--doc/rtd/topics/datasources/altcloud.rst4
-rw-r--r--doc/rtd/topics/datasources/openstack.rst36
-rw-r--r--doc/rtd/topics/format.rst13
-rwxr-xr-xpackages/debian/rules.in2
-rwxr-xr-xsetup.py3
-rwxr-xr-xsystemd/cloud-init-generator39
-rw-r--r--tests/unittests/helpers.py74
-rw-r--r--tests/unittests/test__init__.py92
-rw-r--r--tests/unittests/test_atomic_helper.py4
-rw-r--r--tests/unittests/test_data.py53
-rw-r--r--tests/unittests/test_datasource/test_gce.py4
-rw-r--r--tests/unittests/test_datasource/test_openstack.py11
-rw-r--r--[-rwxr-xr-x]tests/unittests/test_distros/test_user_data_normalize.py0
-rw-r--r--tests/unittests/test_ec2_util.py49
-rw-r--r--[-rwxr-xr-x]tests/unittests/test_net.py184
-rw-r--r--tests/unittests/test_sshutil.py24
-rw-r--r--[-rwxr-xr-x]tools/Z99-cloud-locale-test.sh148
-rw-r--r--tools/Z99-cloudinit-warnings.sh30
-rwxr-xr-xtools/ds-identify1240
-rwxr-xr-xtools/make-mime.py2
-rwxr-xr-xtools/make-tarball2
-rwxr-xr-xtools/mock-meta.py37
-rwxr-xr-xtools/read-version2
-rwxr-xr-xtools/validate-yaml.py2
-rw-r--r--tox.ini8
44 files changed, 2367 insertions, 377 deletions
diff --git a/Makefile b/Makefile
index 5d35dcc0..5940ed7e 100644
--- a/Makefile
+++ b/Makefile
@@ -27,13 +27,16 @@ ifeq ($(distro),)
distro = redhat
endif
-READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version)
+READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version || \
+ echo read-version-failed)
CODE_VERSION=$(shell $(PYVER) -c "from cloudinit import version; print(version.version_string())")
all: check
-check: check_version pep8 $(pyflakes) test $(yaml)
+check: check_version test $(yaml)
+
+style-check: pep8 $(pyflakes)
pep8:
@$(CWD)/tools/run-pep8
@@ -62,8 +65,8 @@ test: $(unittests)
check_version:
@if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \
- echo "Error: read-version version $(READ_VERSION)" \
- "not equal to code version $(CODE_VERSION)"; exit 2; \
+ echo "Error: read-version version '$(READ_VERSION)'" \
+ "not equal to code version '$(CODE_VERSION)'"; exit 2; \
else true; fi
clean_pyc:
@@ -73,7 +76,7 @@ clean: clean_pyc
rm -rf /var/log/cloud-init.log /var/lib/cloud/
yaml:
- @$(CWD)/tools/validate-yaml.py $(YAML_FILES)
+ @$(PYVER) $(CWD)/tools/validate-yaml.py $(YAML_FILES)
rpm:
./packages/brpm --distro $(distro)
@@ -83,3 +86,4 @@ deb:
.PHONY: test pyflakes pyflakes3 clean pep8 rpm deb yaml check_version
.PHONY: pip-test-requirements pip-requirements clean_pyc unittest unittest3
+.PHONY: style-check
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index c83496c5..6ff4e1c0 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -26,8 +26,10 @@ from cloudinit import signal_handler
from cloudinit import sources
from cloudinit import stages
from cloudinit import templater
+from cloudinit import url_helper
from cloudinit import util
from cloudinit import version
+from cloudinit import warnings
from cloudinit import reporting
from cloudinit.reporting import events
@@ -129,23 +131,104 @@ def apply_reporting_cfg(cfg):
reporting.update_configuration(cfg.get('reporting'))
+def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')):
+ data = util.keyval_str_to_dict(cmdline)
+ for key in names:
+ if key in data:
+ return key, data[key]
+ raise KeyError("No keys (%s) found in string '%s'" %
+ (cmdline, names))
+
+
+def attempt_cmdline_url(path, network=True, cmdline=None):
+ """Write data from url referenced in command line to path.
+
+ path: a file to write content to if downloaded.
+ network: should network access be assumed.
+ cmdline: the cmdline to parse for cloud-config-url.
+
+ This is used in MAAS datasource, in "ephemeral" (read-only root)
+ environment where the instance netboots to iscsi ro root.
+ and the entity that controls the pxe config has to configure
+ the maas datasource.
+
+ An attempt is made on network urls even in local datasource
+ for case of network set up in initramfs.
+
+ Return value is a tuple of a logger function (logging.DEBUG)
+ and a message indicating what happened.
+ """
+
+ if cmdline is None:
+ cmdline = util.get_cmdline()
+
+ try:
+ cmdline_name, url = parse_cmdline_url(cmdline)
+ except KeyError:
+ return (logging.DEBUG, "No kernel command line url found.")
+
+ path_is_local = url.startswith("file://") or url.startswith("/")
+
+ if path_is_local and os.path.exists(path):
+ if network:
+ m = ("file '%s' existed, possibly from local stage download"
+ " of command line url '%s'. Not re-writing." % (path, url))
+ level = logging.INFO
+ if path_is_local:
+ level = logging.DEBUG
+ else:
+ m = ("file '%s' existed, possibly from previous boot download"
+ " of command line url '%s'. Not re-writing." % (path, url))
+ level = logging.WARN
+
+ return (level, m)
+
+ kwargs = {'url': url, 'timeout': 10, 'retries': 2}
+ if network or path_is_local:
+ level = logging.WARN
+ kwargs['sec_between'] = 1
+ else:
+ level = logging.DEBUG
+ kwargs['sec_between'] = .1
+
+ data = None
+ header = b'#cloud-config'
+ try:
+ resp = util.read_file_or_url(**kwargs)
+ if resp.ok():
+ data = resp.contents
+ if not resp.contents.startswith(header):
+ if cmdline_name == 'cloud-config-url':
+ level = logging.WARN
+ else:
+ level = logging.INFO
+ return (
+ level,
+ "contents of '%s' did not start with %s" % (url, header))
+ else:
+ return (level,
+ "url '%s' returned code %s. Ignoring." % (url, resp.code))
+
+ except url_helper.UrlError as e:
+ return (level, "retrieving url '%s' failed: %s" % (url, e))
+
+ util.write_file(path, data, mode=0o600)
+ return (logging.INFO,
+ "wrote cloud-config data from %s='%s' to %s" %
+ (cmdline_name, url, path))
+
+
def main_init(name, args):
deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
if args.local:
deps = [sources.DEP_FILESYSTEM]
- if not args.local:
- # See doc/kernel-cmdline.txt
- #
- # This is used in maas datasource, in "ephemeral" (read-only root)
- # environment where the instance netboots to iscsi ro root.
- # and the entity that controls the pxe config has to configure
- # the maas datasource.
- #
- # Could be used elsewhere, only works on network based (not local).
- root_name = "%s.d" % (CLOUD_CONFIG)
- target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
- util.read_write_cmdline_url(target_fn)
+ early_logs = []
+ early_logs.append(
+ attempt_cmdline_url(
+ path=os.path.join("%s.d" % CLOUD_CONFIG,
+ "91_kernel_cmdline_url.cfg"),
+ network=not args.local))
# Cloud-init 'init' stage is broken up into the following sub-stages
# 1. Ensure that the init object fetches its config without errors
@@ -171,12 +254,14 @@ def main_init(name, args):
outfmt = None
errfmt = None
try:
- LOG.debug("Closing stdin")
+ early_logs.append((logging.DEBUG, "Closing stdin."))
util.close_stdin()
(outfmt, errfmt) = util.fixup_output(init.cfg, name)
except Exception:
- util.logexc(LOG, "Failed to setup output redirection!")
- print_exc("Failed to setup output redirection!")
+ msg = "Failed to setup output redirection!"
+ util.logexc(LOG, msg)
+ print_exc(msg)
+ early_logs.append((logging.WARN, msg))
if args.debug:
# Reset so that all the debug handlers are closed out
LOG.debug(("Logging being reset, this logger may no"
@@ -190,6 +275,10 @@ def main_init(name, args):
# been redirected and log now configured.
welcome(name, msg=w_msg)
+ # re-play early log messages before logging was setup
+ for lvl, msg in early_logs:
+ LOG.log(lvl, msg)
+
# Stage 3
try:
init.initialize()
@@ -224,8 +313,15 @@ def main_init(name, args):
" would allow us to stop early.")
else:
existing = "check"
- if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False):
+ mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False)
+ if mcfg:
+ LOG.debug("manual cache clean set from config")
existing = "trust"
+ else:
+ mfile = path_helper.get_ipath_cur("manual_clean_marker")
+ if os.path.exists(mfile):
+ LOG.debug("manual cache clean found from marker: %s", mfile)
+ existing = "trust"
init.purge_cache()
# Delete the non-net file as well
@@ -318,10 +414,48 @@ def main_init(name, args):
# give the activated datasource a chance to adjust
init.activate_datasource()
+ di_report_warn(datasource=init.datasource, cfg=init.cfg)
+
# Stage 10
return (init.datasource, run_module_section(mods, name, name))
+def di_report_warn(datasource, cfg):
+ if 'di_report' not in cfg:
+ LOG.debug("no di_report found in config.")
+ return
+
+ dicfg = cfg.get('di_report', {})
+ if not isinstance(dicfg, dict):
+ LOG.warn("di_report config not a dictionary: %s", dicfg)
+ return
+
+ dslist = dicfg.get('datasource_list')
+ if dslist is None:
+ LOG.warn("no 'datasource_list' found in di_report.")
+ return
+ elif not isinstance(dslist, list):
+ LOG.warn("di_report/datasource_list not a list: %s", dslist)
+ return
+
+ # ds.__module__ is like cloudinit.sources.DataSourceName
+ # where Name is the thing that shows up in datasource_list.
+ modname = datasource.__module__.rpartition(".")[2]
+ if modname.startswith(sources.DS_PREFIX):
+ modname = modname[len(sources.DS_PREFIX):]
+ else:
+ LOG.warn("Datasource '%s' came from unexpected module '%s'.",
+ datasource, modname)
+
+ if modname in dslist:
+ LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s",
+ datasource, modname, dslist)
+ return
+
+ warnings.show_warning('dsid_missing_source', cfg,
+ source=modname, dslist=str(dslist))
+
+
def main_modules(action_name, args):
name = args.mode
# Cloud-init 'modules' stages are broken up into the following sub-stages
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index e42799f9..aa3dfe5f 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -27,7 +27,7 @@ will be used.
**Config keys**::
- perserve_hostname: <true/false>
+ preserve_hostname: <true/false>
fqdn: <fqdn>
hostname: <fqdn/hostname>
"""
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index aa558381..7498c63a 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -190,13 +190,18 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = ['yum']
- # If enabled, then yum will be tolerant of errors on the command line
- # with regard to packages.
- # For example: if you request to install foo, bar and baz and baz is
- # installed; yum won't error out complaining that baz is already
- # installed.
- cmd.append("-t")
+ if util.which('dnf'):
+ LOG.debug('Using DNF for package management')
+ cmd = ['dnf']
+ else:
+ LOG.debug('Using YUM for package management')
+ # the '-t' argument makes yum tolerant of errors on the command
+ # line with regard to packages.
+ #
+ # For example: if you request to install foo, bar and baz and baz
+ # is installed; yum won't error out complaining that baz is already
+ # installed.
+ cmd = ['yum', '-t']
# Determines whether or not yum prompts for confirmation
# of critical actions. We don't want to prompt...
cmd.append("-y")
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index c656ef14..13691549 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -28,7 +28,7 @@ class MetadataLeafDecoder(object):
def __call__(self, field, blob):
if not blob:
- return blob
+ return ''
try:
blob = util.decode_binary(blob)
except UnicodeDecodeError:
@@ -82,6 +82,9 @@ class MetadataMaterializer(object):
field_name = get_name(field)
if not field or not field_name:
continue
+ # Don't materialize credentials
+ if field_name == 'security-credentials':
+ continue
if has_children(field):
if field_name not in children:
children.append(field_name)
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 4528fb01..7435d58d 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -339,6 +339,8 @@ class Paths(object):
"vendordata_raw": "vendor-data.txt",
"vendordata": "vendor-data.txt.i",
"instance_id": ".instance-id",
+ "manual_clean_marker": "manual-clean",
+ "warnings": "warnings",
}
# Set when a datasource becomes active
self.datasource = ds
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index b06ffac9..5b249f1f 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -90,8 +90,6 @@ def _iface_add_attrs(iface, index):
def _iface_start_entry(iface, index, render_hwaddress=False):
fullname = iface['name']
- if index != 0:
- fullname += ":%s" % index
control = iface['control']
if control == "auto":
@@ -113,6 +111,16 @@ def _iface_start_entry(iface, index, render_hwaddress=False):
return lines
+def _subnet_is_ipv6(subnet):
+ # 'static6' or 'dhcp6'
+ if subnet['type'].endswith('6'):
+ # This is a request for DHCPv6.
+ return True
+ elif subnet['type'] == 'static' and ":" in subnet['address']:
+ return True
+ return False
+
+
def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
"""Parses the file contents, placing result into ifaces.
@@ -354,21 +362,23 @@ class Renderer(renderer.Renderer):
sections = []
subnets = iface.get('subnets', {})
if subnets:
- for index, subnet in zip(range(0, len(subnets)), subnets):
+ for index, subnet in enumerate(subnets):
iface['index'] = index
iface['mode'] = subnet['type']
iface['control'] = subnet.get('control', 'auto')
subnet_inet = 'inet'
- if iface['mode'].endswith('6'):
- # This is a request for DHCPv6.
- subnet_inet += '6'
- elif iface['mode'] == 'static' and ":" in subnet['address']:
- # This is a static IPv6 address.
+ if _subnet_is_ipv6(subnet):
subnet_inet += '6'
iface['inet'] = subnet_inet
- if iface['mode'].startswith('dhcp'):
+ if subnet['type'].startswith('dhcp'):
iface['mode'] = 'dhcp'
+ # do not emit multiple 'auto $IFACE' lines as older (precise)
+ # ifupdown complains
+ if True in ["auto %s" % (iface['name']) in line
+ for line in sections]:
+ iface['control'] = 'alias'
+
lines = list(
_iface_start_entry(
iface, index, render_hwaddress=render_hwaddress) +
@@ -378,11 +388,6 @@ class Renderer(renderer.Renderer):
for route in subnet.get('routes', []):
lines.extend(self._render_route(route, indent=" "))
- if len(subnets) > 1 and index == 0:
- tmpl = " post-up ifup %s:%s\n"
- for i in range(1, len(subnets)):
- lines.append(tmpl % (iface['name'], i))
-
sections.append(lines)
else:
# ifenslave docs say to auto the slave devices
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 9be74070..6e7739fb 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -282,12 +282,12 @@ class Renderer(renderer.Renderer):
if len(iface_subnets) == 1:
cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0])
elif len(iface_subnets) > 1:
- for i, iface_subnet in enumerate(iface_subnets,
- start=len(iface.children)):
+ for i, isubnet in enumerate(iface_subnets,
+ start=len(iface_cfg.children)):
iface_sub_cfg = iface_cfg.copy()
iface_sub_cfg.name = "%s:%s" % (iface_name, i)
- iface.children.append(iface_sub_cfg)
- cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet)
+ iface_cfg.children.append(iface_sub_cfg)
+ cls._render_subnet(iface_sub_cfg, route_cfg, isubnet)
@classmethod
def _render_bond_interfaces(cls, network_state, iface_contents):
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index b1fdd31f..692ff5e5 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -14,6 +14,8 @@ CFG_ENV_NAME = "CLOUD_CFG"
# This is expected to be a yaml formatted file
CLOUD_CONFIG = '/etc/cloud/cloud.cfg'
+RUN_CLOUD_CONFIG = '/run/cloud-init/cloud.cfg'
+
# What u get if no config is provided
CFG_BUILTIN = {
'datasource_list': [
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 2d00255c..9debe947 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -22,6 +22,10 @@ class DataSourceAliYun(EC2.DataSourceEc2):
def get_public_ssh_keys(self):
return parse_public_keys(self.metadata.get('public-keys', {}))
+ @property
+ def cloud_platform(self):
+ return EC2.Platforms.ALIYUN
+
def parse_public_keys(public_keys):
keys = []
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index c657fd09..6f01a139 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -16,18 +16,31 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
+from cloudinit import warnings
LOG = logging.getLogger(__name__)
# Which version we are requesting of the ec2 metadata apis
DEF_MD_VERSION = '2009-04-04'
+STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
+STRICT_ID_DEFAULT = "warn"
+
+
+class Platforms(object):
+ ALIYUN = "AliYun"
+ AWS = "AWS"
+ BRIGHTBOX = "Brightbox"
+ SEEDED = "Seeded"
+ UNKNOWN = "Unknown"
+
class DataSourceEc2(sources.DataSource):
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
+ _cloud_platform = None
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -41,8 +54,18 @@ class DataSourceEc2(sources.DataSource):
self.userdata_raw = seed_ret['user-data']
self.metadata = seed_ret['meta-data']
LOG.debug("Using seeded ec2 data from %s", self.seed_dir)
+ self._cloud_platform = Platforms.SEEDED
return True
+ strict_mode, _sleep = read_strict_mode(
+ util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH,
+ STRICT_ID_DEFAULT), ("warn", None))
+
+ LOG.debug("strict_mode: %s, cloud_platform=%s",
+ strict_mode, self.cloud_platform)
+ if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN:
+ return False
+
try:
if not self.wait_for_metadata_service():
return False
@@ -51,8 +74,8 @@ class DataSourceEc2(sources.DataSource):
ec2.get_instance_userdata(self.api_ver, self.metadata_address)
self.metadata = ec2.get_instance_metadata(self.api_ver,
self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
+ LOG.debug("Crawl of metadata service took %.3f seconds",
+ time.time() - start_time)
return True
except Exception:
util.logexc(LOG, "Failed reading from metadata address %s",
@@ -190,6 +213,126 @@ class DataSourceEc2(sources.DataSource):
return az[:-1]
return None
+ @property
+ def cloud_platform(self):
+ if self._cloud_platform is None:
+ self._cloud_platform = identify_platform()
+ return self._cloud_platform
+
+ def activate(self, cfg, is_new_instance):
+ if not is_new_instance:
+ return
+ if self.cloud_platform == Platforms.UNKNOWN:
+ warn_if_necessary(
+ util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT),
+ cfg)
+
+
+def read_strict_mode(cfgval, default):
+ try:
+ return parse_strict_mode(cfgval)
+ except ValueError as e:
+ LOG.warn(e)
+ return default
+
+
+def parse_strict_mode(cfgval):
+ # given a mode like:
+ # true, false, warn,[sleep]
+ # return tuple with string mode (true|false|warn) and sleep.
+ if cfgval is True:
+ return 'true', None
+ if cfgval is False:
+ return 'false', None
+
+ if not cfgval:
+ return 'warn', 0
+
+ mode, _, sleep = cfgval.partition(",")
+ if mode not in ('true', 'false', 'warn'):
+ raise ValueError(
+ "Invalid mode '%s' in strict_id setting '%s': "
+ "Expected one of 'true', 'false', 'warn'." % (mode, cfgval))
+
+ if sleep:
+ try:
+ sleep = int(sleep)
+ except ValueError:
+ raise ValueError("Invalid sleep '%s' in strict_id setting '%s': "
+ "not an integer" % (sleep, cfgval))
+ else:
+ sleep = None
+
+ return mode, sleep
+
+
+def warn_if_necessary(cfgval, cfg):
+ try:
+ mode, sleep = parse_strict_mode(cfgval)
+ except ValueError as e:
+ LOG.warn(e)
+ return
+
+ if mode == "false":
+ return
+
+ warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep)
+
+
+def identify_aws(data):
+ # data is a dictionary returned by _collect_platform_data.
+ if (data['uuid'].startswith('ec2') and
+ (data['uuid_source'] == 'hypervisor' or
+ data['uuid'] == data['serial'])):
+ return Platforms.AWS
+
+ return None
+
+
+def identify_brightbox(data):
+ if data['serial'].endswith('brightbox.com'):
+ return Platforms.BRIGHTBOX
+
+
+def identify_platform():
+ # identify the platform and return an entry in Platforms.
+ data = _collect_platform_data()
+ checks = (identify_aws, identify_brightbox, lambda x: Platforms.UNKNOWN)
+ for checker in checks:
+ try:
+ result = checker(data)
+ if result:
+ return result
+ except Exception as e:
+ LOG.warn("calling %s with %s raised exception: %s",
+ checker, data, e)
+
+
+def _collect_platform_data():
+ # returns a dictionary with all lower case values:
+ # uuid: system-uuid from dmi or /sys/hypervisor
+ # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi'
+ # serial: dmi 'system-serial-number' (/sys/.../product_serial)
+ data = {}
+ try:
+ uuid = util.load_file("/sys/hypervisor/uuid").strip()
+ data['uuid_source'] = 'hypervisor'
+ except Exception:
+ uuid = util.read_dmi_data('system-uuid')
+ data['uuid_source'] = 'dmi'
+
+ if uuid is None:
+ uuid = ''
+ data['uuid'] = uuid.lower()
+
+ serial = util.read_dmi_data('system-serial-number')
+ if serial is None:
+ serial = ''
+
+ data['serial'] = serial.lower()
+
+ return data
+
# Used to match classes to dependencies
datasources = [
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 78928c77..d70784ac 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -48,6 +48,7 @@ class DataSourceOVF(sources.DataSource):
self.environment = None
self.cfg = {}
self.supported_seed_starts = ("/", "file://")
+ self.vmware_customization_supported = True
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -78,7 +79,10 @@ class DataSourceOVF(sources.DataSource):
found.append(seed)
elif system_type and 'vmware' in system_type.lower():
LOG.debug("VMware Virtualization Platform found")
- if not util.get_cfg_option_bool(
+ if not self.vmware_customization_supported:
+ LOG.debug("Skipping the check for "
+ "VMware Customization support")
+ elif not util.get_cfg_option_bool(
self.sys_cfg, "disable_vmware_customization", True):
deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
"libdeployPkgPlugin.so")
@@ -90,17 +94,18 @@ class DataSourceOVF(sources.DataSource):
# copies the customization specification file to
# /var/run/vmware-imc directory. cloud-init code needs
# to search for the file in that directory.
+ max_wait = get_max_wait_from_cfg(self.ds_cfg)
vmwareImcConfigFilePath = util.log_time(
logfunc=LOG.debug,
msg="waiting for configuration file",
func=wait_for_imc_cfg_file,
- args=("/var/run/vmware-imc", "cust.cfg"))
+ args=("/var/run/vmware-imc", "cust.cfg", max_wait))
if vmwareImcConfigFilePath:
- LOG.debug("Found VMware DeployPkg Config File at %s" %
+ LOG.debug("Found VMware Customization Config File at %s",
vmwareImcConfigFilePath)
else:
- LOG.debug("Did not find VMware DeployPkg Config File Path")
+ LOG.debug("Did not find VMware Customization Config File")
else:
LOG.debug("Customization for VMware platform is disabled.")
@@ -206,6 +211,29 @@ class DataSourceOVFNet(DataSourceOVF):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.vmware_customization_supported = False
+
+
+def get_max_wait_from_cfg(cfg):
+ default_max_wait = 90
+ max_wait_cfg_option = 'vmware_cust_file_max_wait'
+ max_wait = default_max_wait
+
+ if not cfg:
+ return max_wait
+
+ try:
+ max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait))
+ except ValueError:
+ LOG.warn("Failed to get '%s', using %s",
+ max_wait_cfg_option, default_max_wait)
+
+ if max_wait <= 0:
+ LOG.warn("Invalid value '%s' for '%s', using '%s' instead",
+ max_wait, max_wait_cfg_option, default_max_wait)
+ max_wait = default_max_wait
+
+ return max_wait
def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
@@ -215,6 +243,7 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
fileFullPath = search_file(dirpath, filename)
if fileFullPath:
return fileFullPath
+ LOG.debug("Waiting for VMware Customization Config File")
time.sleep(naplen)
waited += naplen
return None
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 2a58f1cd..e1ea21f8 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -45,6 +45,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# max_wait < 0 indicates do not wait
max_wait = -1
timeout = 10
+ retries = 5
try:
max_wait = int(self.ds_cfg.get("max_wait", max_wait))
@@ -55,7 +56,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
except Exception:
util.logexc(LOG, "Failed to get timeout, using %s", timeout)
- return (max_wait, timeout)
+
+ try:
+ retries = int(self.ds_cfg.get("retries", retries))
+ except Exception:
+ util.logexc(LOG, "Failed to get max wait. using %s", retries)
+
+ return (max_wait, timeout, retries)
def wait_for_metadata_service(self):
urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
@@ -76,7 +83,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
md_urls.append(md_url)
url2base[md_url] = url
- (max_wait, timeout) = self._get_url_settings()
+ (max_wait, timeout, retries) = self._get_url_settings()
start_time = time.time()
avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
timeout=timeout)
@@ -89,13 +96,15 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
- def get_data(self, retries=5, timeout=5):
+ def get_data(self):
try:
if not self.wait_for_metadata_service():
return False
except IOError:
return False
+ (max_wait, timeout, retries) = self._get_url_settings()
+
try:
results = util.log_time(LOG.debug,
'Crawl of openstack metadata service',
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index d5a7c346..67ac21db 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -101,7 +101,11 @@ class NicConfigurator(object):
return lines
# Static Ipv4
- v4 = nic.staticIpv4
+ addrs = nic.staticIpv4
+ if not addrs:
+ return lines
+
+ v4 = addrs[0]
if v4.ip:
lines.append(' address %s' % v4.ip)
if v4.netmask:
@@ -197,22 +201,6 @@ class NicConfigurator(object):
util.subp(["pkill", "dhclient"], rcs=[0, 1])
util.subp(["rm", "-f", "/var/lib/dhcp/*"])
- def if_down_up(self):
- names = []
- for nic in self.nics:
- name = self.mac2Name.get(nic.mac.lower())
- names.append(name)
-
- for name in names:
- logger.info('Bring down interface %s' % name)
- util.subp(["ifdown", "%s" % name])
-
- self.clear_dhcp()
-
- for name in names:
- logger.info('Bring up interface %s' % name)
- util.subp(["ifup", "%s" % name])
-
def configure(self):
"""
Configure the /etc/network/intefaces
@@ -232,6 +220,6 @@ class NicConfigurator(object):
for line in lines:
fp.write('%s\n' % line)
- self.if_down_up()
+ self.clear_dhcp()
# vi: ts=4 expandtab
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index be8a49e8..b95b956f 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -22,8 +22,11 @@ DEF_SSHD_CFG = "/etc/ssh/sshd_config"
VALID_KEY_TYPES = (
"dsa",
"ecdsa",
+ "ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp256-cert-v01@openssh.com",
+ "ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp384-cert-v01@openssh.com",
+ "ecdsa-sha2-nistp521",
"ecdsa-sha2-nistp521-cert-v01@openssh.com",
"ed25519",
"rsa",
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index b0552dde..5bed9032 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -11,7 +11,8 @@ import sys
import six
from six.moves import cPickle as pickle
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG)
+from cloudinit.settings import (
+ FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG)
from cloudinit import handlers
@@ -188,6 +189,12 @@ class Init(object):
def _write_to_cache(self):
if self.datasource is NULL_DATA_SOURCE:
return False
+ if util.get_cfg_option_bool(self.cfg, 'manual_cache_clean', False):
+ # The empty file in instance/ dir indicates manual cleaning,
+ # and can be read by ds-identify.
+ util.write_file(
+ self.paths.get_ipath_cur("manual_clean_marker"),
+ omode="w", content="")
return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
def _get_datasources(self):
@@ -828,6 +835,10 @@ class Modules(object):
return self._run_modules(mostly_mods)
+def read_runtime_config():
+ return util.read_conf(RUN_CLOUD_CONFIG)
+
+
def fetch_base_config():
return util.mergemanydict(
[
@@ -835,6 +846,8 @@ def fetch_base_config():
util.get_builtin_cfg(),
# Anything in your conf.d or 'default' cloud.cfg location.
util.read_conf_with_confd(CLOUD_CONFIG),
+ # runtime config
+ read_runtime_config(),
# Kernel/cmdline parameters override system config
util.read_conf_from_cmdline(),
], reverse=True)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 5725129e..7196a7ca 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1089,31 +1089,6 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
return fqdn
-def get_cmdline_url(names=('cloud-config-url', 'url'),
- starts=b"#cloud-config", cmdline=None):
- if cmdline is None:
- cmdline = get_cmdline()
-
- data = keyval_str_to_dict(cmdline)
- url = None
- key = None
- for key in names:
- if key in data:
- url = data[key]
- break
-
- if not url:
- return (None, None, None)
-
- resp = read_file_or_url(url)
- # allow callers to pass starts as text when comparing to bytes contents
- starts = encode_text(starts)
- if resp.ok() and resp.contents.startswith(starts):
- return (key, url, resp.contents)
-
- return (key, url, None)
-
-
def is_resolvable(name):
"""determine if a url is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
@@ -1475,25 +1450,6 @@ def ensure_dirs(dirlist, mode=0o755):
ensure_dir(d, mode)
-def read_write_cmdline_url(target_fn):
- if not os.path.exists(target_fn):
- try:
- (key, url, content) = get_cmdline_url()
- except Exception:
- logexc(LOG, "Failed fetching command line url")
- return
- try:
- if key and content:
- write_file(target_fn, content, mode=0o600)
- LOG.debug(("Wrote to %s with contents of command line"
- " url %s (len=%s)"), target_fn, url, len(content))
- elif key and not content:
- LOG.debug(("Command line key %s with url"
- " %s had no contents"), key, url)
- except Exception:
- logexc(LOG, "Failed writing url content to %s", target_fn)
-
-
def yaml_dumps(obj, explicit_start=True, explicit_end=True):
return yaml.safe_dump(obj,
line_break="\n",
diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py
new file mode 100644
index 00000000..3206d4e9
--- /dev/null
+++ b/cloudinit/warnings.py
@@ -0,0 +1,139 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import util
+
+import os
+import time
+
+LOG = logging.getLogger()
+
+WARNINGS = {
+ 'non_ec2_md': """
+This system is using the EC2 Metadata Service, but does not appear to
+be running on Amazon EC2 or one of cloud-init's known platforms that
+provide a EC2 Metadata service. In the future, cloud-init may stop
+reading metadata from the EC2 Metadata Service unless the platform can
+be identified.
+
+If you are seeing this message, please file a bug against
+cloud-init at
+ https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid
+Make sure to include the cloud provider your instance is
+running on.
+
+For more information see
+ https://bugs.launchpad.net/bugs/1660385
+
+After you have filed a bug, you can disable this warning by
+launching your instance with the cloud-config below, or
+putting that content into
+ /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg
+
+#cloud-config
+datasource:
+ Ec2:
+ strict_id: false""",
+ 'dsid_missing_source': """
+A new feature in cloud-init identified possible datasources for
+this system as:
+ {dslist}
+However, the datasource used was: {source}
+
+In the future, cloud-init will only attempt to use datasources that
+are identified or specifically configured.
+For more information see
+ https://bugs.launchpad.net/bugs/1669675
+
+If you are seeing this message, please file a bug against
+cloud-init at
+ https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid
+Make sure to include the cloud provider your instance is
+running on.
+
+After you have filed a bug, you can disable this warning by launching
+your instance with the cloud-config below, or putting that content
+into /etc/cloud/cloud.cfg.d/99-warnings.cfg
+
+#cloud-config
+warnings:
+ dsid_missing_source: off""",
+}
+
+
+def _get_warn_dir(cfg):
+ paths = helpers.Paths(
+ path_cfgs=cfg.get('system_info', {}).get('paths', {}))
+ return paths.get_ipath_cur('warnings')
+
+
+def _load_warn_cfg(cfg, name, mode=True, sleep=None):
+ # parse cfg['warnings']['name'] returning boolean, sleep
+ # expected value is form of:
+ # (on|off|true|false|sleep)[,sleeptime]
+ # boolean True == on, False == off
+ default = (mode, sleep)
+ if not cfg or not isinstance(cfg, dict):
+ return default
+
+ ncfg = util.get_cfg_by_path(cfg, ('warnings', name))
+ if ncfg is None:
+ return default
+
+ if ncfg in ("on", "true", True):
+ return True, None
+
+ if ncfg in ("off", "false", False):
+ return False, None
+
+ mode, _, csleep = ncfg.partition(",")
+ if mode != "sleep":
+ return default
+
+ if csleep:
+ try:
+ sleep = int(csleep)
+ except ValueError:
+ return default
+
+ return True, sleep
+
+
+def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs):
+ # kwargs are used for .format of the message.
+ # sleep and mode are default values used if
+ # cfg['warnings']['name'] is not present.
+ if cfg is None:
+ cfg = {}
+
+ mode, sleep = _load_warn_cfg(cfg, name, mode=mode, sleep=sleep)
+ if not mode:
+ return
+
+ msg = WARNINGS[name].format(**kwargs)
+ msgwidth = 70
+ linewidth = msgwidth + 4
+
+ fmt = "# %%-%ds #" % msgwidth
+ topline = "*" * linewidth + "\n"
+ fmtlines = []
+ for line in msg.strip("\n").splitlines():
+ fmtlines.append(fmt % line)
+
+ closeline = topline
+ if sleep:
+ sleepmsg = " [sleeping for %d seconds] " % sleep
+ closeline = sleepmsg.center(linewidth, "*") + "\n"
+
+ util.write_file(
+ os.path.join(_get_warn_dir(cfg), name),
+ topline + "\n".join(fmtlines) + "\n" + topline)
+
+ LOG.warn(topline + "\n".join(fmtlines) + "\n" + closeline)
+
+ if sleep:
+ LOG.debug("sleeping %d seconds for warning '%s'" % (sleep, name))
+ time.sleep(sleep)
+
+# vi: ts=4 expandtab
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index c5f84b13..c03f1026 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -200,7 +200,7 @@ ssh_import_id: [smoser]
#
# Default: none
#
-debconf_selections: | # Need to perserve newlines
+debconf_selections: | # Need to preserve newlines
# Force debconf priority to critical.
debconf debconf/priority select critical
diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst
index 8646e77e..202b0a4a 100644
--- a/doc/rtd/topics/datasources/altcloud.rst
+++ b/doc/rtd/topics/datasources/altcloud.rst
@@ -66,7 +66,7 @@ NOTE: The file name on the ISO must be: ``user-data.txt``
.. sourcecode:: sh
- % cp simple_scirpt.bash my-iso/user-data.txt
+ % cp simple_script.bash my-iso/user-data.txt
% genisoimage -o user-data.iso -r my-iso
Verify the ISO
@@ -75,7 +75,7 @@ Verify the ISO
.. sourcecode:: sh
% sudo mkdir /media/vsphere_iso
- % sudo mount -o loop JoeV_CI_02.iso /media/vsphere_iso
+ % sudo mount -o loop user-data.iso /media/vsphere_iso
% cat /media/vsphere_iso/user-data.txt
% sudo umount /media/vsphere_iso
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index ea47ea85..164b0e0c 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -1,7 +1,41 @@
OpenStack
=========
-*TODO*
+This datasource supports reading data from the
+`OpenStack Metadata Service
+<http://docs.openstack.org/admin-guide/compute-networking-nova.html#metadata-service>`_.
+
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * **metadata_urls**: This list of urls will be searched for an OpenStack
+ metadata service. The first entry that successfully returns a 200 response
+ for <url>/openstack will be selected. (default: ['http://169.254.169.254']).
+ * **max_wait**: the maximum amount of clock time in seconds that should be
+ spent searching metadata_urls. A value less than zero will result in only
+ one request being made, to the first in the list. (default: -1)
+ * **timeout**: the timeout value provided to urlopen for each individual http
+ request. This is used both when selecting a metadata_url and when crawling
+ the metadata service. (default: 10)
+ * **retries**: The number of retries that should be done for an http request.
+ This value is used only after metadata_url is selected. (default: 5)
+
+An example configuration with the default values is provided as example below:
+
+.. sourcecode:: yaml
+
+ #cloud-config
+ datasource:
+ OpenStack:
+ metadata_urls: ["http://169.254.169.254"]
+ max_wait: -1
+ timeout: 10
+ retries: 5
+
Vendor Data
-----------
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index ed87d3ed..436eb00f 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -127,11 +127,11 @@ Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when u
Part Handler
============
-This is a ``part-handler``. It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated).
-This must be python code that contains a ``list_types`` method and a ``handle_type`` method.
-Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles.
+This is a ``part-handler``: It contains custom code for either supporting new mime-types in multi-part user data, or overriding the existing handlers for supported mime-types. It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated).
+This must be python code that contains a ``list_types`` function and a ``handle_part`` function.
+Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles. Because mime parts are processed in order, a ``part-handler`` part must precede any parts with mime-types it is expected to handle in the same user data.
-The ``handle_type`` method must be like:
+The ``handle_part`` function must be defined like:
.. code-block:: python
@@ -141,8 +141,9 @@ The ``handle_type`` method must be like:
# filename = the filename of the part (or a generated filename if none is present in mime data)
# payload = the parts' content
-Cloud-init will then call the ``handle_type`` method once at begin, once per part received, and once at end.
-The ``begin`` and ``end`` calls are to allow the part handler to do initialization or teardown.
+Cloud-init will then call the ``handle_part`` function once before it handles any parts, once per part received, and once after all parts have been handled.
+The ``'__begin__'`` and ``'__end__'`` sentinels allow the part handler to do initialization or teardown before or after
+receiving any parts.
Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when using a MIME archive.
diff --git a/packages/debian/rules.in b/packages/debian/rules.in
index 9b004357..053b7649 100755
--- a/packages/debian/rules.in
+++ b/packages/debian/rules.in
@@ -11,6 +11,8 @@ override_dh_install:
dh_install
install -d debian/cloud-init/etc/rsyslog.d
cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf
+ install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh
+ install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh
override_dh_auto_test:
ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
diff --git a/setup.py b/setup.py
index 04036078..e6693c90 100755
--- a/setup.py
+++ b/setup.py
@@ -168,7 +168,8 @@ else:
(ETC + '/cloud/templates', glob('templates/*')),
(ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']),
(ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']),
- (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init',
+ (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify',
+ 'tools/uncloud-init',
'tools/write-ssh-key-fingerprints']),
(USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]),
(USR + '/share/doc/cloud-init/examples',
diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator
index fedb6309..bd9f2678 100755
--- a/systemd/cloud-init-generator
+++ b/systemd/cloud-init-generator
@@ -6,6 +6,8 @@ DEBUG_LEVEL=1
LOG_D="/run/cloud-init"
ENABLE="enabled"
DISABLE="disabled"
+FOUND="found"
+NOTFOUND="notfound"
RUN_ENABLED_FILE="$LOG_D/$ENABLE"
CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target"
CLOUD_TARGET_NAME="cloud-init.target"
@@ -74,10 +76,30 @@ default() {
_RET="$ENABLE"
}
+check_for_datasource() {
+ local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify"
+ if [ ! -x "$dsidentify" ]; then
+ debug 1 "no ds-identify in $dsidentify. _RET=$FOUND"
+ return 0
+ fi
+ $dsidentify
+ ds_rc=$?
+ debug 1 "ds-identify rc=$ds_rc"
+ if [ "$ds_rc" = "0" ]; then
+ _RET="$FOUND"
+ debug 1 "ds-identify _RET=$_RET"
+ return 0
+ fi
+ _RET="$NOTFOUND"
+ debug 1 "ds-identify _RET=$_RET"
+ return 1
+}
+
main() {
local normal_d="$1" early_d="$2" late_d="$3"
local target_name="multi-user.target" gen_d="$early_d"
local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}"
+ local ds="$NOTFOUND"
debug 1 "$0 normal=$normal_d early=$early_d late=$late_d"
debug 2 "$0 $*"
@@ -93,7 +115,20 @@ main() {
debug 0 "search $search returned $ret"
fi
done
-
+
+ # enable AND ds=found == enable
+ # enable AND ds=notfound == disable
+ # disable || <any> == disabled
+ if [ "$result" = "$ENABLE" ]; then
+ debug 1 "checking for datasource"
+ check_for_datasource
+ ds=$_RET
+ if [ "$ds" = "$NOTFOUND" ]; then
+ debug 1 "cloud-init is enabled but no datasource found, disabling"
+ result="$DISABLE"
+ fi
+ fi
+
if [ "$result" = "$ENABLE" ]; then
if [ -e "$link_path" ]; then
debug 1 "already enabled: no change needed"
@@ -124,7 +159,7 @@ main() {
rm -f "$RUN_ENABLED_FILE"
fi
else
- debug 0 "unexpected result '$result'"
+ debug 0 "unexpected result '$result' 'ds=$ds'"
ret=3
fi
return $ret
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index cf3b46d2..90e2431f 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -29,7 +29,6 @@ PY2 = False
PY26 = False
PY27 = False
PY3 = False
-FIX_HTTPRETTY = False
_PY_VER = sys.version_info
_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3]
@@ -44,8 +43,6 @@ else:
PY2 = True
if (_PY_MAJOR, _PY_MINOR) >= (3, 0):
PY3 = True
- if _PY_MINOR == 4 and _PY_MICRO < 3:
- FIX_HTTPRETTY = True
# Makes the old path start
@@ -86,6 +83,28 @@ class TestCase(unittest2.TestCase):
pass
+class CiTestCase(TestCase):
+ """This is the preferred test case base class unless user
+ needs other test case classes below."""
+ def tmp_dir(self, dir=None, cleanup=True):
+ # return a full path to a temporary directory that will be cleaned up.
+ if dir is None:
+ tmpd = tempfile.mkdtemp(
+ prefix="ci-%s." % self.__class__.__name__)
+ else:
+ tmpd = tempfile.mkdtemp(dir=dir)
+ self.addCleanup(functools.partial(shutil.rmtree, tmpd))
+ return tmpd
+
+ def tmp_path(self, path, dir=None):
+ # return an absolute path to 'path' under dir.
+ # if dir is None, one will be created with tmp_dir()
+ # the file is not created or modified.
+ if dir is None:
+ dir = self.tmp_dir()
+ return os.path.normpath(os.path.abspath(os.path.join(dir, path)))
+
+
class ResourceUsingTestCase(TestCase):
def setUp(self):
super(ResourceUsingTestCase, self).setUp()
@@ -216,37 +235,6 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
return root
-def import_httpretty():
- """Import HTTPretty and monkey patch Python 3.4 issue.
- See https://github.com/gabrielfalcao/HTTPretty/pull/193 and
- as well as https://github.com/gabrielfalcao/HTTPretty/issues/221.
-
- Lifted from
- https://github.com/inveniosoftware/datacite/blob/master/tests/helpers.py
- """
- if not FIX_HTTPRETTY:
- import httpretty
- else:
- import socket
- old_SocketType = socket.SocketType
-
- import httpretty
- from httpretty import core
-
- def sockettype_patch(f):
- @functools.wraps(f)
- def inner(*args, **kwargs):
- f(*args, **kwargs)
- socket.SocketType = old_SocketType
- socket.__dict__['SocketType'] = old_SocketType
- return inner
-
- core.httpretty.disable = sockettype_patch(
- httpretty.httpretty.disable
- )
- return httpretty
-
-
class HttprettyTestCase(TestCase):
# necessary as http_proxy gets in the way of httpretty
# https://github.com/gabrielfalcao/HTTPretty/issues/122
@@ -262,23 +250,10 @@ class HttprettyTestCase(TestCase):
super(HttprettyTestCase, self).tearDown()
-class TempDirTestCase(TestCase):
- # provide a tempdir per class, not per test.
- def setUp(self):
- super(TempDirTestCase, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def tmp_path(self, path):
- if path.startswith(os.path.sep):
- path = "." + path
-
- return os.path.normpath(os.path.join(self.tmp, path))
-
-
def populate_dir(path, files):
if not os.path.exists(path):
os.makedirs(path)
+ ret = []
for (name, content) in files.items():
p = os.path.join(path, name)
util.ensure_dir(os.path.dirname(p))
@@ -288,6 +263,9 @@ def populate_dir(path, files):
else:
fp.write(content.encode('utf-8'))
fp.close()
+ ret.append(p)
+
+ return ret
def dir2dict(startdir, prefix=None):
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 7b6f8c4e..781f6d54 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -1,16 +1,18 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
import os
import shutil
import tempfile
+from cloudinit.cmd import main
from cloudinit import handlers
from cloudinit import helpers
from cloudinit import settings
from cloudinit import url_helper
from cloudinit import util
-from .helpers import TestCase, ExitStack, mock
+from .helpers import TestCase, CiTestCase, ExitStack, mock
class FakeModule(handlers.Handler):
@@ -170,44 +172,68 @@ class TestHandlerHandlePart(TestCase):
self.data, self.ctype, self.filename, self.payload)
-class TestCmdlineUrl(TestCase):
- def test_invalid_content(self):
- url = "http://example.com/foo"
- key = "mykey"
- payload = b"0"
- cmdline = "ro %s=%s bar=1" % (key, url)
+class TestCmdlineUrl(CiTestCase):
+ def test_parse_cmdline_url_nokey_raises_keyerror(self):
+ self.assertRaises(
+ KeyError, main.parse_cmdline_url, 'root=foo bar single')
- with mock.patch('cloudinit.url_helper.readurl',
- return_value=url_helper.StringResponse(payload)):
- self.assertEqual(
- util.get_cmdline_url(names=[key], starts="xxxxxx",
- cmdline=cmdline),
- (key, url, None))
+ def test_parse_cmdline_url_found(self):
+ cmdline = 'root=foo bar single url=http://example.com arg1 -v'
+ self.assertEqual(
+ ('url', 'http://example.com'), main.parse_cmdline_url(cmdline))
- def test_valid_content(self):
- url = "http://example.com/foo"
- key = "mykey"
- payload = b"xcloud-config\nmydata: foo\nbar: wark\n"
+ @mock.patch('cloudinit.cmd.main.util.read_file_or_url')
+ def test_invalid_content(self, m_read):
+ key = "cloud-config-url"
+ url = 'http://example.com/foo'
cmdline = "ro %s=%s bar=1" % (key, url)
+ m_read.return_value = url_helper.StringResponse(b"unexpected blob")
- with mock.patch('cloudinit.url_helper.readurl',
- return_value=url_helper.StringResponse(payload)):
- self.assertEqual(
- util.get_cmdline_url(names=[key], starts=b"xcloud-config",
- cmdline=cmdline),
- (key, url, payload))
+ fpath = self.tmp_path("ccfile")
+ lvl, msg = main.attempt_cmdline_url(
+ fpath, network=True, cmdline=cmdline)
+ self.assertEqual(logging.WARN, lvl)
+ self.assertIn(url, msg)
+ self.assertFalse(os.path.exists(fpath))
- def test_no_key_found(self):
+ @mock.patch('cloudinit.cmd.main.util.read_file_or_url')
+ def test_valid_content(self, m_read):
url = "http://example.com/foo"
- key = "mykey"
- cmdline = "ro %s=%s bar=1" % (key, url)
-
- with mock.patch('cloudinit.url_helper.readurl',
- return_value=url_helper.StringResponse(b'')):
- self.assertEqual(
- util.get_cmdline_url(names=["does-not-appear"],
- starts="#cloud-config", cmdline=cmdline),
- (None, None, None))
+ payload = b"#cloud-config\nmydata: foo\nbar: wark\n"
+ cmdline = "ro %s=%s bar=1" % ('cloud-config-url', url)
+
+ m_read.return_value = url_helper.StringResponse(payload)
+ fpath = self.tmp_path("ccfile")
+ lvl, msg = main.attempt_cmdline_url(
+ fpath, network=True, cmdline=cmdline)
+ self.assertEqual(util.load_file(fpath, decode=False), payload)
+ self.assertEqual(logging.INFO, lvl)
+ self.assertIn(url, msg)
+
+ @mock.patch('cloudinit.cmd.main.util.read_file_or_url')
+ def test_no_key_found(self, m_read):
+ cmdline = "ro mykey=http://example.com/foo root=foo"
+ fpath = self.tmp_path("ccpath")
+ lvl, msg = main.attempt_cmdline_url(
+ fpath, network=True, cmdline=cmdline)
+
+ m_read.assert_not_called()
+ self.assertFalse(os.path.exists(fpath))
+ self.assertEqual(logging.DEBUG, lvl)
+
+ @mock.patch('cloudinit.cmd.main.util.read_file_or_url')
+ def test_exception_warns(self, m_read):
+ url = "http://example.com/foo"
+ cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url
+ fpath = self.tmp_path("ccfile")
+ m_read.side_effect = url_helper.UrlError(
+ cause="Unexpected Error", url="http://example.com/foo")
+
+ lvl, msg = main.attempt_cmdline_url(
+ fpath, network=True, cmdline=cmdline)
+ self.assertEqual(logging.WARN, lvl)
+ self.assertIn(url, msg)
+ self.assertFalse(os.path.exists(fpath))
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py
index e170c7c3..515919d8 100644
--- a/tests/unittests/test_atomic_helper.py
+++ b/tests/unittests/test_atomic_helper.py
@@ -6,10 +6,10 @@ import stat
from cloudinit import atomic_helper
-from . import helpers
+from .helpers import CiTestCase
-class TestAtomicHelper(helpers.TempDirTestCase):
+class TestAtomicHelper(CiTestCase):
def test_basic_usage(self):
"""write_file takes bytes if no omode."""
path = self.tmp_path("test_basic_usage")
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 4092d9ca..4ad86bb6 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -564,12 +564,12 @@ class TestConvertString(helpers.TestCase):
class TestFetchBaseConfig(helpers.TestCase):
-
- def test_only_builtin_gets_builtin2(self):
+ def test_only_builtin_gets_builtin(self):
ret = helpers.wrap_and_call(
- 'cloudinit.stages.util',
- {'read_conf_with_confd': None,
- 'read_conf_from_cmdline': None},
+ 'cloudinit.stages',
+ {'util.read_conf_with_confd': None,
+ 'util.read_conf_from_cmdline': None,
+ 'read_runtime_config': {'return_value': {}}},
stages.fetch_base_config)
self.assertEqual(util.get_builtin_cfg(), ret)
@@ -578,9 +578,11 @@ class TestFetchBaseConfig(helpers.TestCase):
test_key = sorted(builtin)[0]
test_value = 'test'
ret = helpers.wrap_and_call(
- 'cloudinit.stages.util',
- {'read_conf_with_confd': {'return_value': {test_key: test_value}},
- 'read_conf_from_cmdline': None},
+ 'cloudinit.stages',
+ {'util.read_conf_with_confd':
+ {'return_value': {test_key: test_value}},
+ 'util.read_conf_from_cmdline': None,
+ 'read_runtime_config': {'return_value': {}}},
stages.fetch_base_config)
self.assertEqual(ret.get(test_key), test_value)
builtin[test_key] = test_value
@@ -592,25 +594,44 @@ class TestFetchBaseConfig(helpers.TestCase):
test_value = 'test'
cmdline = {test_key: test_value}
ret = helpers.wrap_and_call(
- 'cloudinit.stages.util',
- {'read_conf_from_cmdline': {'return_value': cmdline},
- 'read_conf_with_confd': None},
+ 'cloudinit.stages',
+ {'util.read_conf_from_cmdline': {'return_value': cmdline},
+ 'util.read_conf_with_confd': None,
+ 'read_runtime_config': None},
stages.fetch_base_config)
self.assertEqual(ret.get(test_key), test_value)
builtin[test_key] = test_value
self.assertEqual(ret, builtin)
- def test_cmdline_overrides_conf_d_and_defaults(self):
+ def test_cmdline_overrides_confd_runtime_and_defaults(self):
builtin = {'key1': 'value0', 'key3': 'other2'}
conf_d = {'key1': 'value1', 'key2': 'other1'}
cmdline = {'key3': 'other3', 'key2': 'other2'}
+ runtime = {'key3': 'runtime3'}
ret = helpers.wrap_and_call(
- 'cloudinit.stages.util',
- {'read_conf_with_confd': {'return_value': conf_d},
- 'get_builtin_cfg': {'return_value': builtin},
- 'read_conf_from_cmdline': {'return_value': cmdline}},
+ 'cloudinit.stages',
+ {'util.read_conf_with_confd': {'return_value': conf_d},
+ 'util.get_builtin_cfg': {'return_value': builtin},
+ 'read_runtime_config': {'return_value': runtime},
+ 'util.read_conf_from_cmdline': {'return_value': cmdline}},
stages.fetch_base_config)
self.assertEqual(ret, {'key1': 'value1', 'key2': 'other2',
'key3': 'other3'})
+ def test_order_precedence_is_builtin_system_runtime_cmdline(self):
+ builtin = {'key1': 'builtin0', 'key3': 'builtin3'}
+ conf_d = {'key1': 'confd1', 'key2': 'confd2', 'keyconfd1': 'kconfd1'}
+ runtime = {'key1': 'runtime1', 'key2': 'runtime2'}
+ cmdline = {'key1': 'cmdline1'}
+ ret = helpers.wrap_and_call(
+ 'cloudinit.stages',
+ {'util.read_conf_with_confd': {'return_value': conf_d},
+ 'util.get_builtin_cfg': {'return_value': builtin},
+ 'util.read_conf_from_cmdline': {'return_value': cmdline},
+ 'read_runtime_config': {'return_value': runtime},
+ },
+ stages.fetch_base_config)
+ self.assertEqual(ret, {'key1': 'cmdline1', 'key2': 'runtime2',
+ 'key3': 'builtin3', 'keyconfd1': 'kconfd1'})
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 4f667678..4f83454e 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -4,6 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import httpretty
import re
from base64 import b64encode, b64decode
@@ -15,7 +16,6 @@ from cloudinit.sources import DataSourceGCE
from .. import helpers as test_helpers
-httpretty = test_helpers.import_httpretty()
GCE_META = {
'instance/id': '123',
@@ -59,6 +59,8 @@ def _set_mock_metadata(gce_meta=None):
else:
return (404, headers, '')
+ # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316
+ httpretty.reset()
httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index e5b6fcc6..7bf55084 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -5,6 +5,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
+import httpretty as hp
import json
import re
@@ -20,8 +21,6 @@ from cloudinit.sources import DataSourceOpenStack as ds
from cloudinit.sources.helpers import openstack
from cloudinit import util
-hp = test_helpers.import_httpretty()
-
BASE_URL = "http://169.254.169.254"
PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
EC2_META = {
@@ -232,7 +231,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
None,
helpers.Paths({}))
self.assertIsNone(ds_os.version)
- found = ds_os.get_data(timeout=0.1, retries=0)
+ found = ds_os.get_data()
self.assertTrue(found)
self.assertEqual(2, ds_os.version)
md = dict(ds_os.metadata)
@@ -256,7 +255,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
None,
helpers.Paths({}))
self.assertIsNone(ds_os.version)
- found = ds_os.get_data(timeout=0.1, retries=0)
+ found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
@@ -275,7 +274,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
'timeout': 0,
}
self.assertIsNone(ds_os.version)
- found = ds_os.get_data(timeout=0.1, retries=0)
+ found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
@@ -298,7 +297,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
'timeout': 0,
}
self.assertIsNone(ds_os.version)
- found = ds_os.get_data(timeout=0.1, retries=0)
+ found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
index 88746e0a..88746e0a 100755..100644
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ b/tests/unittests/test_distros/test_user_data_normalize.py
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
index 4a33d747..65fdb519 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/test_ec2_util.py
@@ -1,12 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import httpretty as hp
+
from . import helpers
from cloudinit import ec2_utils as eu
from cloudinit import url_helper as uh
-hp = helpers.import_httpretty()
-
class TestEc2Util(helpers.HttprettyTestCase):
VERSION = 'latest'
@@ -140,4 +140,49 @@ class TestEc2Util(helpers.HttprettyTestCase):
self.assertEqual(bdm['ami'], 'sdb')
self.assertEqual(bdm['ephemeral0'], 'sdc')
+ @hp.activate
+ def test_metadata_no_security_credentials(self):
+ base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
+ hp.register_uri(hp.GET, base_url, status=200,
+ body="\n".join(['instance-id',
+ 'iam/']))
+ hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
+ status=200, body='i-0123451689abcdef0')
+ hp.register_uri(hp.GET,
+ uh.combine_url(base_url, 'iam/'),
+ status=200,
+ body="\n".join(['info/', 'security-credentials/']))
+ hp.register_uri(hp.GET,
+ uh.combine_url(base_url, 'iam/info/'),
+ status=200,
+ body='LastUpdated')
+ hp.register_uri(hp.GET,
+ uh.combine_url(base_url, 'iam/info/LastUpdated'),
+ status=200, body='2016-10-27T17:29:39Z')
+ hp.register_uri(hp.GET,
+ uh.combine_url(base_url, 'iam/security-credentials/'),
+ status=200,
+ body='ReadOnly/')
+ hp.register_uri(hp.GET,
+ uh.combine_url(base_url,
+ 'iam/security-credentials/ReadOnly/'),
+ status=200,
+ body="\n".join(['LastUpdated', 'Expiration']))
+ hp.register_uri(hp.GET,
+ uh.combine_url(
+ base_url,
+ 'iam/security-credentials/ReadOnly/LastUpdated'),
+ status=200, body='2016-10-27T17:28:17Z')
+ hp.register_uri(hp.GET,
+ uh.combine_url(
+ base_url,
+ 'iam/security-credentials/ReadOnly/Expiration'),
+ status=200, body='2016-10-28T00:00:34Z')
+ md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
+ self.assertEqual(md['instance-id'], 'i-0123451689abcdef0')
+ iam = md['iam']
+ self.assertEqual(1, len(iam))
+ self.assertEqual(iam['info']['LastUpdated'], '2016-10-27T17:29:39Z')
+ self.assertNotIn('security-credentials', iam)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 1090282a..4b03ff72 100755..100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -8,11 +8,10 @@ from cloudinit.net import sysconfig
from cloudinit.sources.helpers import openstack
from cloudinit import util
+from .helpers import CiTestCase
from .helpers import dir2dict
from .helpers import mock
from .helpers import populate_dir
-from .helpers import TempDirTestCase
-from .helpers import TestCase
import base64
import copy
@@ -20,8 +19,6 @@ import gzip
import io
import json
import os
-import shutil
-import tempfile
import textwrap
import yaml
@@ -166,6 +163,91 @@ nameserver 172.19.0.12
('etc/udev/rules.d/70-persistent-net.rules',
"".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
+ },
+ {
+ 'in_data': {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [{
+ "network_id": "public-ipv4",
+ "type": "ipv4", "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [{
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }],
+ "ip_address": "172.19.1.34", "id": "network0"
+ }, {
+ "network_id": "private-ipv4",
+ "type": "ipv4", "netmask": "255.255.255.0",
+ "link": "tap1a81968a-79",
+ "routes": [],
+ "ip_address": "10.0.0.10", "id": "network1"
+ }],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None, "type": "bridge", "id":
+ "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ },
+ ],
+ },
+ 'in_macs': {
+ 'fa:16:3e:ed:9a:59': 'eth0',
+ },
+ 'out_sysconfig': [
+ ('etc/sysconfig/network-scripts/ifcfg-eth0',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=none
+DEVICE=eth0
+HWADDR=fa:16:3e:ed:9a:59
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()),
+ ('etc/sysconfig/network-scripts/ifcfg-eth0:0',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=static
+DEFROUTE=yes
+DEVICE=eth0:0
+GATEWAY=172.19.3.254
+HWADDR=fa:16:3e:ed:9a:59
+IPADDR=172.19.1.34
+NETMASK=255.255.252.0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()),
+ ('etc/sysconfig/network-scripts/ifcfg-eth0:1',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=static
+DEVICE=eth0:1
+HWADDR=fa:16:3e:ed:9a:59
+IPADDR=10.0.0.10
+NETMASK=255.255.255.0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()),
+ ('etc/resolv.conf',
+ """
+; Created by cloud-init on instance boot automatically, do not edit.
+;
+nameserver 172.19.0.12
+""".lstrip()),
+ ('etc/udev/rules.d/70-persistent-net.rules',
+ "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
}
]
@@ -222,11 +304,9 @@ NETWORK_CONFIGS = {
auto eth99
iface eth99 inet dhcp
- post-up ifup eth99:1
-
- auto eth99:1
- iface eth99:1 inet static
+ # control-alias eth99
+ iface eth99 inet static
address 192.168.21.3/24
dns-nameservers 8.8.8.8 8.8.4.4
dns-search barley.maas sach.maas
@@ -264,6 +344,27 @@ NETWORK_CONFIGS = {
- wark.maas
"""),
},
+ 'v4_and_v6': {
+ 'expected_eni': textwrap.dedent("""\
+ auto lo
+ iface lo inet loopback
+
+ auto iface0
+ iface iface0 inet dhcp
+
+ # control-alias iface0
+ iface iface0 inet6 dhcp
+ """).rstrip(' '),
+ 'yaml': textwrap.dedent("""\
+ version: 1
+ config:
+ - type: 'physical'
+ name: 'iface0'
+ subnets:
+ - {'type': 'dhcp4'}
+ - {'type': 'dhcp6'}
+ """).rstrip(' '),
+ },
'all': {
'expected_eni': ("""\
auto lo
@@ -301,11 +402,9 @@ iface br0 inet static
address 192.168.14.2/24
bridge_ports eth3 eth4
bridge_stp off
- post-up ifup br0:1
-
-auto br0:1
-iface br0:1 inet6 static
+# control-alias br0
+iface br0 inet6 static
address 2001:1::1/64
auto bond0.200
@@ -322,11 +421,9 @@ iface eth0.101 inet static
mtu 1500
vlan-raw-device eth0
vlan_id 101
- post-up ifup eth0.101:1
-
-auto eth0.101:1
-iface eth0.101:1 inet static
+# control-alias eth0.101
+iface eth0.101 inet static
address 192.168.2.10/24
post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
@@ -478,7 +575,7 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net,
mock_sys_dev_path.side_effect = sys_dev_path
-class TestSysConfigRendering(TestCase):
+class TestSysConfigRendering(CiTestCase):
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@@ -486,8 +583,7 @@ class TestSysConfigRendering(TestCase):
def test_default_generation(self, mock_get_devicelist,
mock_read_sys_net,
mock_sys_dev_path):
- tmp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmp_dir)
+ tmp_dir = self.tmp_dir()
_setup_test(tmp_dir, mock_get_devicelist,
mock_read_sys_net, mock_sys_dev_path)
@@ -518,10 +614,8 @@ USERCTL=no
self.assertEqual(expected_content, content)
def test_openstack_rendering_samples(self):
- tmp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmp_dir)
- render_dir = os.path.join(tmp_dir, "render")
for os_sample in OS_SAMPLES:
+ render_dir = self.tmp_dir()
ex_input = os_sample['in_data']
ex_mac_addrs = os_sample['in_macs']
network_cfg = openstack.convert_net_json(
@@ -535,7 +629,7 @@ USERCTL=no
self.assertEqual(expected_content, fh.read())
-class TestEniNetRendering(TestCase):
+class TestEniNetRendering(CiTestCase):
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@@ -543,8 +637,7 @@ class TestEniNetRendering(TestCase):
def test_default_generation(self, mock_get_devicelist,
mock_read_sys_net,
mock_sys_dev_path):
- tmp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmp_dir)
+ tmp_dir = self.tmp_dir()
_setup_test(tmp_dir, mock_get_devicelist,
mock_read_sys_net, mock_sys_dev_path)
@@ -576,7 +669,7 @@ iface eth1000 inet dhcp
self.assertEqual(expected.lstrip(), contents.lstrip())
-class TestEniNetworkStateToEni(TestCase):
+class TestEniNetworkStateToEni(CiTestCase):
mycfg = {
'config': [{"type": "physical", "name": "eth0",
"mac_address": "c0:d6:9f:2c:e8:80",
@@ -607,7 +700,7 @@ class TestEniNetworkStateToEni(TestCase):
self.assertNotIn("hwaddress", rendered)
-class TestCmdlineConfigParsing(TestCase):
+class TestCmdlineConfigParsing(CiTestCase):
simple_cfg = {
'config': [{"type": "physical", "name": "eth0",
"mac_address": "c0:d6:9f:2c:e8:80",
@@ -665,7 +758,7 @@ class TestCmdlineConfigParsing(TestCase):
self.assertEqual(found, self.simple_cfg)
-class TestCmdlineReadKernelConfig(TempDirTestCase):
+class TestCmdlineReadKernelConfig(CiTestCase):
macs = {
'eth0': '14:02:ec:42:48:00',
'eno1': '14:02:ec:42:48:01',
@@ -673,8 +766,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase):
def test_ip_cmdline_read_kernel_cmdline_ip(self):
content = {'net-eth0.conf': DHCP_CONTENT_1}
- populate_dir(self.tmp, content)
- files = [os.path.join(self.tmp, k) for k in content.keys()]
+ files = sorted(populate_dir(self.tmp_dir(), content))
found = cmdline.read_kernel_cmdline_config(
files=files, cmdline='foo ip=dhcp', mac_addrs=self.macs)
exp1 = copy.deepcopy(DHCP_EXPECTED_1)
@@ -684,8 +776,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase):
def test_ip_cmdline_read_kernel_cmdline_ip6(self):
content = {'net6-eno1.conf': DHCP6_CONTENT_1}
- populate_dir(self.tmp, content)
- files = [os.path.join(self.tmp, k) for k in content.keys()]
+ files = sorted(populate_dir(self.tmp_dir(), content))
found = cmdline.read_kernel_cmdline_config(
files=files, cmdline='foo ip6=dhcp root=/dev/sda',
mac_addrs=self.macs)
@@ -701,8 +792,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase):
def test_ip_cmdline_read_kernel_cmdline_none(self):
# if there is no ip= or ip6= on cmdline, return value should be None
content = {'net6-eno1.conf': DHCP6_CONTENT_1}
- populate_dir(self.tmp, content)
- files = [os.path.join(self.tmp, k) for k in content.keys()]
+ files = sorted(populate_dir(self.tmp_dir(), content))
found = cmdline.read_kernel_cmdline_config(
files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs)
self.assertEqual(found, None)
@@ -710,8 +800,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase):
def test_ip_cmdline_both_ip_ip6(self):
content = {'net-eth0.conf': DHCP_CONTENT_1,
'net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')}
- populate_dir(self.tmp, content)
- files = [os.path.join(self.tmp, k) for k in sorted(content.keys())]
+ files = sorted(populate_dir(self.tmp_dir(), content))
found = cmdline.read_kernel_cmdline_config(
files=files, cmdline='foo ip=dhcp ip6=dhcp', mac_addrs=self.macs)
@@ -725,14 +814,12 @@ class TestCmdlineReadKernelConfig(TempDirTestCase):
self.assertEqual(found['config'], expected)
-class TestEniRoundTrip(TestCase):
- def setUp(self):
- super(TestCase, self).setUp()
- self.tmp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp_dir)
-
+class TestEniRoundTrip(CiTestCase):
def _render_and_read(self, network_config=None, state=None, eni_path=None,
- links_prefix=None, netrules_path=None):
+ links_prefix=None, netrules_path=None, dir=None):
+ if dir is None:
+ dir = self.tmp_dir()
+
if network_config:
ns = network_state.parse_net_config_data(network_config)
elif state:
@@ -747,8 +834,8 @@ class TestEniRoundTrip(TestCase):
config={'eni_path': eni_path, 'links_path_prefix': links_prefix,
'netrules_path': netrules_path})
- renderer.render_network_state(self.tmp_dir, ns)
- return dir2dict(self.tmp_dir)
+ renderer.render_network_state(dir, ns)
+ return dir2dict(dir)
def testsimple_convert_and_render(self):
network_config = eni.convert_eni_data(EXAMPLE_ENI)
@@ -771,6 +858,13 @@ class TestEniRoundTrip(TestCase):
entry['expected_eni'].splitlines(),
files['/etc/network/interfaces'].splitlines())
+ def testsimple_render_v4_and_v6(self):
+ entry = NETWORK_CONFIGS['v4_and_v6']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_eni'].splitlines(),
+ files['/etc/network/interfaces'].splitlines())
+
def test_routes_rendered(self):
# as reported in bug 1649652
conf = [
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index 55971b5e..991f45a6 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -32,6 +32,22 @@ VALID_CONTENT = {
"YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07"
"/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw=="
),
+ 'ecdsa-sha2-nistp256': (
+ "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMy/WuXq5MF"
+ "r5hVQ9EEKKUTF7vUaOkgxUh6bNsCs9SFMVslIm1zM/WJYwUv52LdEePjtDYiV4A"
+ "l2XthJ9/bs7Pc="
+ ),
+ 'ecdsa-sha2-nistp521': (
+ "AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABOdNTkh9F"
+ "McK4hZRLs5LTXBEXwNr0+Yg9uvJYRFcz2ZlnjYX9tM4Z3QQFjqogU4pU+zpKLqZ"
+ "5VE4Jcnb1T608UywBIdXkSFZT8trGJqBv9nFWGgmTX3KP8kiBbihpuv1cGwglPl"
+ "Hxs50A42iP0JiT7auGtEAGsu/uMql323GTGb4171Q=="
+ ),
+ 'ecdsa-sha2-nistp384': (
+ "AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBAnoqFU9Gnl"
+ "LcsEuCJnobs/c6whzvjCgouaOO61kgXNtIxyF4Wkutg6xaGYgBBt/phb7a2TurI"
+ "bcIBuzJ/mP22UyUAbNnBfStAEBmYbrTf1EfiMCYUAr1XnL0UdYmZ8HFg=="
+ ),
}
TEST_OPTIONS = (
@@ -44,7 +60,13 @@ class TestAuthKeyLineParser(test_helpers.TestCase):
def test_simple_parse(self):
# test key line with common 3 fields (keytype, base64, comment)
parser = ssh_util.AuthKeyLineParser()
- for ktype in ['rsa', 'ecdsa', 'dsa']:
+ ecdsa_types = [
+ 'ecdsa-sha2-nistp256',
+ 'ecdsa-sha2-nistp384',
+ 'ecdsa-sha2-nistp521',
+ ]
+
+ for ktype in ['rsa', 'ecdsa', 'dsa'] + ecdsa_types:
content = VALID_CONTENT[ktype]
comment = 'user-%s@host' % ktype
line = ' '.join((ktype, content, comment,))
diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh
index 5912bae2..4978d87e 100755..100644
--- a/tools/Z99-cloud-locale-test.sh
+++ b/tools/Z99-cloud-locale-test.sh
@@ -11,90 +11,90 @@
# of how to fix them.
locale_warn() {
- local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv=""
- local w1 w2 w3 w4 remain
+ local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv=""
+ local w1 w2 w3 w4 remain
- # if shell is zsh, act like sh only for this function (-L).
- # The behavior change will not permenently affect user's shell.
- [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh
+ # if shell is zsh, act like sh only for this function (-L).
+ # The behavior change will not permenently affect user's shell.
+ [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh
- # locale is expected to output either:
- # VARIABLE=
- # VARIABLE="value"
- # locale: Cannot set LC_SOMETHING to default locale
- while read -r w1 w2 w3 w4 remain; do
- case "$w1" in
- locale:) bad_names="${bad_names} ${w4}";;
- *)
- key=${w1%%=*}
- val=${w1#*=}
- val=${val#\"}
- val=${val%\"}
- vars="${vars} $key=$val";;
- esac
- done
- for bad in $bad_names; do
- for var in ${vars}; do
- [ "${bad}" = "${var%=*}" ] || continue
- val=${var#*=}
- [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] &&
- bad_lcs="${bad_lcs} ${val}"
- bad_kv="${bad_kv} $bad=$val"
- break
- done
- done
- bad_lcs=${bad_lcs# }
- bad_kv=${bad_kv# }
- [ -n "$bad_lcs" ] || return 0
+ # locale is expected to output either:
+ # VARIABLE=
+ # VARIABLE="value"
+ # locale: Cannot set LC_SOMETHING to default locale
+ while read -r w1 w2 w3 w4 remain; do
+ case "$w1" in
+ locale:) bad_names="${bad_names} ${w4}";;
+ *)
+ key=${w1%%=*}
+ val=${w1#*=}
+ val=${val#\"}
+ val=${val%\"}
+ vars="${vars} $key=$val";;
+ esac
+ done
+ for bad in $bad_names; do
+ for var in ${vars}; do
+ [ "${bad}" = "${var%=*}" ] || continue
+ val=${var#*=}
+ [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] &&
+ bad_lcs="${bad_lcs} ${val}"
+ bad_kv="${bad_kv} $bad=$val"
+ break
+ done
+ done
+ bad_lcs=${bad_lcs# }
+ bad_kv=${bad_kv# }
+ [ -n "$bad_lcs" ] || return 0
- printf "_____________________________________________________________________\n"
- printf "WARNING! Your environment specifies an invalid locale.\n"
- printf " The unknown environment variables are:\n %s\n" "$bad_kv"
- printf " This can affect your user experience significantly, including the\n"
- printf " ability to manage packages. You may install the locales by running:\n\n"
+ printf "_____________________________________________________________________\n"
+ printf "WARNING! Your environment specifies an invalid locale.\n"
+ printf " The unknown environment variables are:\n %s\n" "$bad_kv"
+ printf " This can affect your user experience significantly, including the\n"
+ printf " ability to manage packages. You may install the locales by running:\n\n"
- local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED"
- local pkgs=""
- if [ -e "$sfile" ]; then
- for bad in ${bad_lcs}; do
- grep -q -i "${bad}" "$sfile" &&
- to_gen="${to_gen} ${bad}" ||
- invalid="${invalid} ${bad}"
- done
- else
- printf " sudo apt-get install locales\n"
- to_gen=$bad_lcs
- fi
- to_gen=${to_gen# }
+ local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED"
+ local pkgs=""
+ if [ -e "$sfile" ]; then
+ for bad in ${bad_lcs}; do
+ grep -q -i "${bad}" "$sfile" &&
+ to_gen="${to_gen} ${bad}" ||
+ invalid="${invalid} ${bad}"
+ done
+ else
+ printf " sudo apt-get install locales\n"
+ to_gen=$bad_lcs
+ fi
+ to_gen=${to_gen# }
- local pkgs=""
- for bad in ${to_gen}; do
- pkgs="${pkgs} language-pack-${bad%%_*}"
- done
- pkgs=${pkgs# }
+ local pkgs=""
+ for bad in ${to_gen}; do
+ pkgs="${pkgs} language-pack-${bad%%_*}"
+ done
+ pkgs=${pkgs# }
- if [ -n "${pkgs}" ]; then
- printf " sudo apt-get install ${pkgs# }\n"
- printf " or\n"
- printf " sudo locale-gen ${to_gen# }\n"
- printf "\n"
- fi
- for bad in ${invalid}; do
- printf "WARNING: '${bad}' is an invalid locale\n"
- done
+ if [ -n "${pkgs}" ]; then
+ printf " sudo apt-get install ${pkgs# }\n"
+ printf " or\n"
+ printf " sudo locale-gen ${to_gen# }\n"
+ printf "\n"
+ fi
+ for bad in ${invalid}; do
+ printf "WARNING: '${bad}' is an invalid locale\n"
+ done
- printf "To see all available language packs, run:\n"
- printf " apt-cache search \"^language-pack-[a-z][a-z]$\"\n"
- printf "To disable this message for all users, run:\n"
- printf " sudo touch /var/lib/cloud/instance/locale-check.skip\n"
- printf "_____________________________________________________________________\n\n"
+ printf "To see all available language packs, run:\n"
+ printf " apt-cache search \"^language-pack-[a-z][a-z]$\"\n"
+ printf "To disable this message for all users, run:\n"
+ printf " sudo touch /var/lib/cloud/instance/locale-check.skip\n"
+ printf "_____________________________________________________________________\n\n"
- # only show the message once
- : > ~/.cloud-locale-test.skip 2>/dev/null || :
+ # only show the message once
+ : > ~/.cloud-locale-test.skip 2>/dev/null || :
}
[ -f ~/.cloud-locale-test.skip -o -f /var/lib/cloud/instance/locale-check.skip ] ||
- locale 2>&1 | locale_warn
+ locale 2>&1 | locale_warn
unset locale_warn
-# vi: ts=4 noexpandtab
+# vi: ts=4 expandtab
diff --git a/tools/Z99-cloudinit-warnings.sh b/tools/Z99-cloudinit-warnings.sh
new file mode 100644
index 00000000..b237786b
--- /dev/null
+++ b/tools/Z99-cloudinit-warnings.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Purpose: show user warnings on login.
+
+cloud_init_warnings() {
+ local skipf="" warning="" idir="/var/lib/cloud/instance" n=0
+ local warndir="$idir/warnings"
+ local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip"
+ [ -d "$warndir" ] || return 0
+ [ ! -f "$ufile" ] || return 0
+ [ ! -f "$skipf" ] || return 0
+
+ for warning in "$warndir"/*; do
+ [ -f "$warning" ] || continue
+ cat "$warning"
+ n=$((n+1))
+ done
+ [ $n -eq 0 ] && return 0
+ echo ""
+ echo "Disable the warnings above by:"
+ echo " touch $ufile"
+ echo "or"
+ echo " touch $sfile"
+}
+
+cloud_init_warnings 1>&2
+unset cloud_init_warnings
+
+# vi: syntax=sh ts=4 expandtab
diff --git a/tools/ds-identify b/tools/ds-identify
new file mode 100755
index 00000000..d7b2a0b2
--- /dev/null
+++ b/tools/ds-identify
@@ -0,0 +1,1240 @@
+#!/bin/sh
+#
+# ds-identify is configured via /etc/cloud/ds-identify.cfg
+# or on the kernel command line. It takes primarily 2 inputs:
+# datasource: can specify the datasource that should be used.
+# kernel command line option: ci.datasource=<dsname>
+#
+# policy: a string that indicates how ds-identify should operate.
+# kernel command line option: ci.di.policy=<policy>
+# default setting is:
+# search,found=all,maybe=all,notfound=disable
+#
+# report: write config to /run/cloud-init/cloud.cfg, but
+# namespaced under 'di_report'. Thus cloud-init can still see
+# the result, but has no affect.
+# enable: do nothing
+# ds-identify writes no config and just exits success
+# the caller (cloud-init-generator) then enables cloud-init to run
+# just without any aid from ds-identify.
+# disable: disable cloud-init
+#
+# [report,]found=value,maybe=value,notfound=value
+# found: (default=first)
+# first: use the first found do no further checking
+# all: enable all DS_FOUND
+#
+# maybe: (default=all)
+# if nothing returned 'found', then how to handle maybe.
+# no network sources are allowed to return 'maybe'.
+# all: enable all DS_MAYBE
+# none: ignore any DS_MAYBE
+#
+# notfound: (default=disabled)
+# disabled: disable cloud-init
+# enabled: enable cloud-init
+#
+# ci.datasource.ec2.strict_id: (true|false|warn[,0-9])
+# if ec2 datasource does not strictly match,
+# return not_found if true
+# return maybe if false or warn*.
+#
+
+set -u
+set -f
+UNAVAILABLE="unavailable"
+CR="
+"
+ERROR="error"
+DI_ENABLED="enabled"
+DI_DISABLED="disabled"
+
+DI_DEBUG_LEVEL="${DEBUG_LEVEL:-1}"
+
+PATH_ROOT=${PATH_ROOT:-""}
+PATH_RUN=${PATH_RUN:-"${PATH_ROOT}/run"}
+PATH_SYS_CLASS_DMI_ID=${PATH_SYS_CLASS_DMI_ID:-${PATH_ROOT}/sys/class/dmi/id}
+PATH_SYS_HYPERVISOR=${PATH_SYS_HYPERVISOR:-${PATH_ROOT}/sys/hypervisor}
+PATH_SYS_CLASS_BLOCK=${PATH_SYS_CLASS_BLOCK:-${PATH_ROOT}/sys/class/block}
+PATH_DEV_DISK="${PATH_DEV_DISK:-${PATH_ROOT}/dev/disk}"
+PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}"
+PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}"
+PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}"
+PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}"
+PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}"
+PATH_PROC_UPTIME=${PATH_PROC_UPTIME:-${PATH_ROOT}/proc/uptime}
+PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}"
+PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}"
+PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg}
+PATH_RUN_DI_RESULT=${PATH_RUN_DI_RESULT:-${PATH_RUN_CI}/.ds-identify.result}
+
+DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}"
+_DI_LOGGED=""
+
+# set DI_MAIN='noop' in environment to source this file with no main called.
+DI_MAIN=${DI_MAIN:-main}
+
+DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}"
+DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}"
+DI_DMI_PRODUCT_NAME=""
+DI_DMI_SYS_VENDOR=""
+DI_DMI_PRODUCT_SERIAL=""
+DI_DMI_PRODUCT_UUID=""
+DI_FS_LABELS=""
+DI_KERNEL_CMDLINE=""
+DI_VIRT=""
+DI_PID_1_PLATFORM=""
+
+DI_UNAME_KERNEL_NAME=""
+DI_UNAME_KERNEL_RELEASE=""
+DI_UNAME_KERNEL_VERSION=""
+DI_UNAME_MACHINE=""
+DI_UNAME_NODENAME=""
+DI_UNAME_OPERATING_SYSTEM=""
+DI_UNAME_CMD_OUT=""
+
+DS_FOUND=0
+DS_NOT_FOUND=1
+DS_MAYBE=2
+
+DI_DSNAME=""
+# this has to match the builtin list in cloud-init, it is what will
+# be searched if there is no setting found in config.
+DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
+CloudSigma CloudStack DigitalOcean Ec2 OpenNebula OpenStack OVF SmartOS"
+DI_DSLIST=""
+DI_MODE=""
+DI_REPORT=""
+DI_ON_FOUND=""
+DI_ON_MAYBE=""
+DI_ON_NOTFOUND=""
+
+DI_EC2_STRICT_ID_DEFAULT="true"
+
+error() {
+ set -- "ERROR:" "$@";
+ debug 0 "$@"
+ stderr "$@"
+}
+warn() {
+ set -- "WARN:" "$@"
+ debug 0 "$@"
+ stderr "$@"
+}
+
+stderr() { echo "$@" 1>&2; }
+
+debug() {
+ local lvl="$1"
+ shift
+ [ "$lvl" -gt "${DI_DEBUG_LEVEL}" ] && return
+
+ if [ "$_DI_LOGGED" != "$DI_LOG" ]; then
+ # first time here, open file descriptor for append
+ case "$DI_LOG" in
+ stderr) :;;
+ ?*/*)
+ if [ ! -d "${DI_LOG%/*}" ]; then
+ mkdir -p "${DI_LOG%/*}" || {
+ stderr "ERROR:" "cannot write to $DI_LOG"
+ DI_LOG="stderr"
+ }
+ fi
+ esac
+ if [ "$DI_LOG" = "stderr" ]; then
+ exec 3>&2
+ else
+ ( exec 3>>"$DI_LOG" ) && exec 3>>"$DI_LOG" || {
+ stderr "ERROR: failed writing to $DI_LOG. logging to stderr.";
+ exec 3>&2
+ DI_LOG="stderr"
+ }
+ fi
+ _DI_LOGGED="$DI_LOG"
+ fi
+ echo "$@" 1>&3
+}
+
+get_dmi_field() {
+ local path="${PATH_SYS_CLASS_DMI_ID}/$1"
+ if [ ! -f "$path" ] || [ ! -r "$path" ]; then
+ _RET="$UNAVAILABLE"
+ return
+ fi
+ read _RET < "${path}" || _RET="$ERROR"
+}
+
+block_dev_with_label() {
+ local p="${PATH_DEV_DISK}/by-label/$1"
+ [ -b "$p" ] || return 1
+ _RET=$p
+ return 0
+}
+
+read_fs_labels() {
+ cached "${DI_FS_LABELS}" && return 0
+ # do not rely on links in /dev/disk which might not be present yet.
+ # note that older blkid versions do not report DEVNAME in 'export' output.
+ local out="" ret=0 oifs="$IFS" line="" delim=","
+ local labels=""
+ if is_container; then
+ # blkid will in a container, or at least currently in lxd
+ # not provide useful information.
+ DI_FS_LABELS="$UNAVAILABLE:container"
+ else
+ out=$(blkid -c /dev/null -o export) || {
+ ret=$?
+ error "failed running [$ret]: blkid -c /dev/null -o export"
+ return $ret
+ }
+ IFS="$CR"
+ set -- $out
+ IFS="$oifs"
+ for line in "$@"; do
+ case "${line}" in
+ LABEL=*) labels="${labels}${line#LABEL=}${delim}";;
+ esac
+ done
+ DI_FS_LABELS="${labels%${delim}}"
+ fi
+}
+
+cached() {
+ [ -n "$1" ] && _RET="$1" && return || return 1
+}
+
+
+has_cdrom() {
+ [ -e "${PATH_ROOT}/dev/cdrom" ]
+}
+
+read_virt() {
+ cached "$DI_VIRT" && return 0
+ local out="" r="" virt="${UNAVAILABLE}"
+ if [ -d /run/systemd ]; then
+ out=$(systemd-detect-virt 2>&1)
+ r=$?
+ if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then
+ virt="$out"
+ fi
+ fi
+ DI_VIRT=$virt
+}
+
+is_container() {
+ case "${DI_VIRT}" in
+ lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;;
+ *) return 1;;
+ esac
+}
+
+read_kernel_cmdline() {
+ cached "${DI_KERNEL_CMDLINE}" && return
+ local cmdline="" fpath="${PATH_PROC_CMDLINE}"
+ if is_container; then
+ local p1path="${PATH_PROC_1_CMDLINE}" x=""
+ cmdline="${UNAVAILABLE}:container"
+ if [ -f "$p1path" ] && x=$(tr '\0' ' ' < "$p1path"); then
+ cmdline=$x
+ fi
+ elif [ -f "$fpath" ]; then
+ read cmdline <"$fpath"
+ else
+ cmdline="${UNAVAILABLE}:no-cmdline"
+ fi
+ DI_KERNEL_CMDLINE="$cmdline"
+}
+
+read_dmi_sys_vendor() {
+ cached "${DI_DMI_SYS_VENDOR}" && return
+ get_dmi_field sys_vendor
+ DI_DMI_SYS_VENDOR="$_RET"
+}
+
+read_dmi_product_name() {
+ cached "${DI_DMI_PRODUCT_NAME}" && return
+ get_dmi_field product_name
+ DI_DMI_PRODUCT_NAME="$_RET"
+}
+
+read_dmi_product_uuid() {
+ cached "${DI_DMI_PRODUCT_UUID}" && return
+ get_dmi_field product_uuid
+ DI_DMI_PRODUCT_UUID="$_RET"
+}
+
+read_dmi_product_serial() {
+ cached "${DI_DMI_PRODUCT_SERIAL}" && return
+ get_dmi_field product_serial
+ DI_DMI_PRODUCT_SERIAL="$_RET"
+}
+
+read_uname_info() {
+ # run uname, and parse output.
+ # uname is tricky to parse as it outputs always in a given order
+ # independent of option order. kernel-version is known to have spaces.
+ # 1 -s kernel-name
+ # 2 -n nodename
+ # 3 -r kernel-release
+ # 4.. -v kernel-version(whitespace)
+ # N-2 -m machine
+ # N-1 -o operating-system
+ cached "${DI_UNAME_CMD_OUT}" && return
+ local out="${1:-}" ret=0 buf=""
+ if [ -z "$out" ]; then
+ out=$(uname -snrvmo) || {
+ ret=$?
+ error "failed reading uname with 'uname -snrvmo'"
+ return $ret
+ }
+ fi
+ set -- $out
+ DI_UNAME_KERNEL_NAME="$1"
+ DI_UNAME_NODENAME="$2"
+ DI_UNAME_KERNEL_RELEASE="$3"
+ shift 3
+ while [ $# -gt 2 ]; do
+ buf="$buf $1"
+ shift
+ done
+ DI_UNAME_KERNEL_VERSION="${buf# }"
+ DI_UNAME_MACHINE="$1"
+ DI_UNAME_OPERATING_SYSTEM="$2"
+ DI_UNAME_CMD_OUT="$out"
+ return 0
+}
+
+parse_yaml_array() {
+ # parse a yaml single line array value ([1,2,3], not key: [1,2,3]).
+ # supported with or without leading and closing brackets
+ # ['1'] or [1]
+ # '1', '2'
+ local val="$1" oifs="$IFS" ret="" tok=""
+ val=${val#[}
+ val=${val%]}
+ IFS=","; set -- $val; IFS="$oifs"
+ for tok in "$@"; do
+ trim "$tok"
+ unquote "$_RET"
+ ret="${ret} $_RET"
+ done
+ _RET="${ret# }"
+}
+
+read_datasource_list() {
+ cached "$DI_DSLIST" && return
+ local dslist=""
+ # if DI_DSNAME is set directly, then avoid parsing config.
+ if [ -n "${DI_DSNAME}" ]; then
+ dslist="${DI_DSNAME}"
+ fi
+
+ # LP: #1582323. cc:{'datasource_list': ['name']}
+ # more generically cc:<yaml>[end_cc]
+ local cb="]" ob="["
+ case "$DI_KERNEL_CMDLINE" in
+ *cc:*datasource_list*)
+ t=${DI_KERNEL_CMDLINE##*datasource_list}
+ t=${t%%$cb*}
+ t=${t##*$ob}
+ parse_yaml_array "$t"
+ dslist=${_RET}
+ ;;
+ esac
+ if [ -z "$dslist" ] && check_config datasource_list; then
+ debug 1 "$_RET_fname set datasource_list: $_RET"
+ parse_yaml_array "$_RET"
+ dslist=${_RET}
+ fi
+ if [ -z "$dslist" ]; then
+ dslist=${DI_DSLIST_DEFAULT}
+ debug 1 "no datasource_list found, using default:" $dslist
+ fi
+ DI_DSLIST=$dslist
+ return 0
+}
+
+read_pid1_platform() {
+ local oifs="$IFS" out="" tok="" key="" val="" platform="${UNAVAILABLE}"
+ cached "${DI_PID_1_PLATFORM}" && return
+ [ -r "${PATH_PROC_1_ENVIRON}" ] || return
+ out=$(tr '\0' '\n' <"${PATH_PROC_1_ENVIRON}")
+ IFS="$CR"; set -- $out; IFS="$oifs"
+ for tok in "$@"; do
+ key=${tok%%=*}
+ [ "$key" != "$tok" ] || continue
+ val=${tok#*=}
+ [ "$key" = "platform" ] && platform="$val" && break
+ done
+ DI_PID_1_PLATFORM="$platform"
+}
+
+dmi_product_name_matches() {
+ is_container && return 1
+ case "${DI_DMI_PRODUCT_NAME}" in
+ $1) return 0;;
+ esac
+ return 1
+}
+
+dmi_product_name_is() {
+ is_container && return 1
+ [ "${DI_DMI_PRODUCT_NAME}" = "$1" ]
+}
+
+dmi_sys_vendor_is() {
+ is_container && return 1
+ [ "${DI_DMI_SYS_VENDOR}" = "$1" ]
+}
+
+has_fs_with_label() {
+ local label="$1"
+ case ",${DI_FS_LABELS}," in
+ *,$label,*) return 0;;
+ esac
+ return 1
+}
+
+nocase_equal() {
+ # nocase_equal(a, b)
+ # return 0 if case insenstive comparision a.lower() == b.lower()
+ # different lengths
+ [ "${#1}" = "${#2}" ] || return 1
+ # case sensitive equal
+ [ "$1" = "$2" ] && return 0
+
+ local delim="-delim-"
+ out=$(echo "$1${delim}$2" | tr A-Z a-z)
+ [ "${out#*${delim}}" = "${out%${delim}*}" ]
+}
+
+check_seed_dir() {
+ # check_seed_dir(name, [required])
+ # check the seed dir /var/lib/cloud/seed/<name> for 'required'
+ # required defaults to 'meta-data'
+ local name="$1"
+ local dir="${PATH_VAR_LIB_CLOUD}/seed/$name"
+ [ -d "$dir" ] || return 1
+ shift
+ if [ $# -eq 0 ]; then
+ set -- meta-data
+ fi
+ local f=""
+ for f in "$@"; do
+ [ -f "$dir/$f" ] || return 1
+ done
+ return 0
+}
+
+probe_floppy() {
+ cached "${STATE_FLOPPY_PROBED}" && return "${STATE_FLOPPY_PROBED}"
+ local fpath=/dev/floppy
+
+ [ -b "$fpath" ] ||
+ { STATE_FLOPPY_PROBED=1; return 1; }
+
+ modprobe --use-blacklist floppy >/dev/null 2>&1 ||
+ { STATE_FLOPPY_PROBED=1; return 1; }
+
+ udevadm settle "--exit-if-exists=$fpath" ||
+ { STATE_FLOPPY_PROBED=1; return 1; }
+
+ [ -b "$fpath" ]
+ STATE_FLOPPY_PROBED=$?
+ return "${STATE_FLOPPY_PROBED}"
+}
+
+
+dscheck_CloudStack() {
+ is_container && return ${DS_NOT_FOUND}
+ dmi_product_name_matches "CloudStack*" && return $DS_FOUND
+ return $DS_NOT_FOUND
+}
+
+dscheck_CloudSigma() {
+ # http://paste.ubuntu.com/23624795/
+ dmi_product_name_is "CloudSigma" && return $DS_FOUND
+ return $DS_NOT_FOUND
+}
+
+check_config() {
+ # somewhat hackily read config for 'key' in files matching 'files'
+ # currently does not respect any hierarchy.
+ local key="$1" files="" bp="${PATH_CLOUD_CONFD}/cloud.cfg"
+ if [ $# -eq 1 ]; then
+ files="$bp ${bp}.d/*.cfg"
+ else
+ files="$*"
+ fi
+ shift
+ set +f; set -- $files; set +f;
+ if [ "$1" = "$files" -a ! -f "$1" ]; then
+ return 1
+ fi
+ local fname="" line="" ret="" found=0 found_fn=""
+ for fname in "$@"; do
+ [ -f "$fname" ] || continue
+ while read line; do
+ line=${line%%#*}
+ case "$line" in
+ $key:\ *|$key:)
+ ret=${line#*:};
+ ret=${ret# };
+ found=$((found+1))
+ found_fn="$fname";;
+ esac
+ done <"$fname"
+ done
+ if [ $found -ne 0 ]; then
+ _RET="$ret"
+ _RET_fname="$found_fn"
+ return 0
+ fi
+ return 1
+}
+
+dscheck_MAAS() {
+ is_container && return "${DS_NOT_FOUND}"
+ # heuristic check for ephemeral boot environment
+ # for maas that do not set 'ci.dsname=' in the ephemeral environment
+ # these have iscsi root and cloud-config-url on the cmdline.
+ local maasiqn="iqn.2004-05.com.ubuntu:maas"
+ case "${DI_KERNEL_CMDLINE}" in
+ *cloud-config-url=*${maasiqn}*|*${maasiqn}*cloud-config-url=*)
+ return ${DS_FOUND}
+ ;;
+ esac
+
+ # check config files written by maas for installed system.
+ local confd="${PATH_CLOUD_CONFD}"
+ local fnmatch="$confd/*maas*.cfg $confd/*kernel_cmdline*.cfg"
+ if check_config "MAAS" "$fnmatch"; then
+ return "${DS_FOUND}"
+ fi
+ return ${DS_NOT_FOUND}
+}
+
+dscheck_NoCloud() {
+ local fslabel="cidata" d=""
+ case " ${DI_KERNEL_CMDLINE} " in
+ *\ ds=nocloud*) return ${DS_FOUND};;
+ esac
+ for d in nocloud nocloud-net; do
+ check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
+ done
+ if has_fs_with_label "${fslabel}"; then
+ return ${DS_FOUND}
+ fi
+ return ${DS_NOT_FOUND}
+}
+
+check_configdrive_v2() {
+ if has_fs_with_label "config-2"; then
+ return ${DS_FOUND}
+ fi
+ return ${DS_NOT_FOUND}
+}
+
+check_configdrive_v1() {
+ # FIXME: this has to check any file system that is vfat...
+ # for now, just return not found.
+ return ${DS_NOT_FOUND}
+}
+
+dscheck_ConfigDrive() {
+ local ret=""
+ check_configdrive_v2
+ ret=$?
+ [ $DS_FOUND -eq $ret ] && return $ret
+
+ check_configdrive_v1
+}
+
+dscheck_DigitalOcean() {
+ dmi_sys_vendor_is DigitalOcean && return ${DS_FOUND}
+ return ${DS_NOT_FOUND}
+}
+
+dscheck_OpenNebula() {
+ check_seed_dir opennebula && return ${DS_FOUND}
+ has_fs_with_label "CONTEXT" && return ${DS_FOUND}
+ return ${DS_NOT_FOUND}
+}
+
+ovf_vmware_guest_customization() {
+ # vmware guest customization
+
+ # virt provider must be vmware
+ [ "${DI_VIRT}" = "vmware" ] || return 1
+
+ # we have to have the plugin to do vmware customization
+ local found="" pkg="" pre="/usr/lib"
+ for pkg in vmware-tools open-vm-tools; do
+ if [ -f "$pre/$pkg/plugins/vmsvc/libdeployPkgPlugin.so" ]; then
+ found="$pkg"; break;
+ fi
+ done
+ [ -n "$found" ] || return 1
+
+ # vmware customization is disabled by default
+ # (disable_vmware_customization=true). If it is set to false, then
+ # user has requested customization.
+ local key="disable_vmware_customization"
+ local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg"
+ match="$bp $bp.d/*[Oo][Vv][Ff]*.cfg"
+ if check_config "$key" "$match"; then
+ debug 2 "${_RET_fname} set $key to $_RET"
+ case "$_RET" in
+ 0|false|False) return 0;;
+ *) return 1;;
+ esac
+ fi
+
+ return 1
+}
+
+dscheck_OVF() {
+ local p=""
+ check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}"
+
+ if ovf_vmware_guest_customization; then
+ return ${DS_FOUND}
+ fi
+
+ has_cdrom || return ${DS_NOT_FOUND}
+
+ # FIXME: currently just return maybe if there is a cdrom
+ # ovf iso9660 transport does not specify an fs label.
+ # better would be to check if
+ return ${DS_MAYBE}
+}
+
+dscheck_Azure() {
+ # http://paste.ubuntu.com/23630873/
+ # $ grep /sr0 /run/blkid/blkid.tab
+ # <device DEVNO="0x0b00" TIME="1481737655.543841"
+ # UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209"
+ # TYPE="udf">/dev/sr0</device>
+ #
+ check_seed_dir azure ovf-env.xml && return ${DS_FOUND}
+
+ [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND}
+
+ has_fs_with_label "rd_rdfe_*" && return ${DS_FOUND}
+
+ return ${DS_NOT_FOUND}
+}
+
+dscheck_Bigstep() {
+ # bigstep is activated by presense of seed file 'url'
+ check_seed_dir "bigstep" url && return ${DS_FOUND}
+ return ${DS_NOT_FOUND}
+}
+
+ec2_read_strict_setting() {
+ # the 'strict_id' setting for Ec2 controls behavior when
+ # the platform does not identify itself directly as Ec2.
+ # order of precedence is:
+ # 1. builtin setting here cloud-init/ds-identify builtin
+ # 2. ds-identify config
+ # 3. system config (/etc/cloud/cloud.cfg.d/*Ec2*.cfg)
+ # 4. kernel command line (undocumented)
+ # 5. user-data or vendor-data (not available here)
+ local default="$1" key="ci.datasource.ec2.strict_id" val=""
+
+ # 4. kernel command line
+ case " ${DI_KERNEL_CMDLINE} " in
+ *\ $key=*\ )
+ val=${DI_KERNEL_CMDLINE##*$key=}
+ val=${val%% *};
+ _RET=${val:-$default}
+ return 0
+ esac
+
+ # 3. look for the key 'strict_id' (datasource/Ec2/strict_id)
+ local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg"
+ match="$bp $bp.d/*[Ee][Cc]2*.cfg"
+ if check_config strict_id "$match"; then
+ debug 2 "${_RET_fname} set strict_id to $_RET"
+ return 0
+ fi
+
+ # 2. ds-identify config (datasource.ec2.strict)
+ local config="${PATH_DI_CONFIG}"
+ if [ -f "$config" ]; then
+ if _read_config "$key" < "$config"; then
+ _RET=${_RET:-$default}
+ return 0
+ fi
+ fi
+
+ # 1. Default
+ _RET=$default
+ return 0
+}
+
+ec2_identify_platform() {
+ local default="$1"
+ local serial="${DI_DMI_PRODUCT_SERIAL}"
+
+ # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693
+ case "$serial" in
+ *brightbox.com) _RET="Brightbox"; return 0;;
+ esac
+
+ # AWS http://docs.aws.amazon.com/AWSEC2/
+ # latest/UserGuide/identify_ec2_instances.html
+ local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid"
+ # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2'
+ if [ -r "$hvuuid" ] && read uuid < "$hvuuid" &&
+ [ "${uuid#ec2}" != "$uuid" ]; then
+ _RET="AWS"
+ return 0
+ fi
+
+ # product uuid and product serial start with case insensitive
+ local uuid="${DI_DMI_PRODUCT_UUID}"
+ case "$uuid:$serial" in
+ [Ee][Cc]2*:[Ee][Cc]2)
+ # both start with ec2, now check for case insenstive equal
+ nocase_equal "$uuid" "$serial" &&
+ { _RET="AWS"; return 0; };;
+ esac
+
+ _RET="$default"
+ return 0;
+}
+
+dscheck_Ec2() {
+ check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND}
+ is_container && return ${DS_NOT_FOUND}
+
+ local unknown="Unknown" platform=""
+ if ec2_identify_platform "$unknown"; then
+ platform="$_RET"
+ else
+ warn "Failed to identify ec2 platform. Using '$unknown'."
+ platform=$unknown
+ fi
+
+ debug 1 "ec2 platform is '$platform'."
+ if [ "$platform" != "$unknown" ]; then
+ return $DS_FOUND
+ fi
+
+ local default="${DI_EC2_STRICT_ID_DEFAULT}"
+ if ec2_read_strict_setting "$default"; then
+ strict="$_RET"
+ else
+ debug 1 "ec2_read_strict returned non-zero: $?. using '$default'."
+ strict="$default"
+ fi
+
+ local key="datasource/Ec2/strict_id"
+ case "$strict" in
+ true|false|warn|warn,[0-9]*) :;;
+ *)
+ warn "$key was set to invalid '$strict'. using '$default'"
+ strict="$default";;
+ esac
+
+ _RET_excfg="datasource: {Ec2: {strict_id: \"$strict\"}}"
+ if [ "$strict" = "true" ]; then
+ return $DS_NOT_FOUND
+ else
+ return $DS_MAYBE
+ fi
+}
+
+dscheck_GCE() {
+ if dmi_product_name_is "Google Compute Engine"; then
+ return ${DS_FOUND}
+ fi
+ return ${DS_NOT_FOUND}
+}
+
+dscheck_OpenStack() {
+ # the openstack metadata http service
+
+ # if there is a config drive, then do not check metadata
+ # FIXME: if config drive not in the search list, then we should not
+ # do this check.
+ check_configdrive_v2
+ if [ $? -eq ${DS_FOUND} ]; then
+ return ${DS_NOT_FOUND}
+ fi
+ if dmi_product_name_is "OpenStack Nova"; then
+ return ${DS_FOUND}
+ fi
+ if [ "${DI_PID_1_PLATFORM}" = "OpenStack Nova" ]; then
+ return ${DS_FOUND}
+ fi
+
+ return ${DS_NOT_FOUND}
+}
+
+dscheck_AliYun() {
+ # aliyun is not enabled by default (LP: #1638931)
+ # so if we are here, it is because the datasource_list was
+ # set to include it. Thus, 'maybe'.
+ return $DS_MAYBE
+}
+
+dscheck_AltCloud() {
+ # ctype: either the dmi product name, or contents of
+ # /etc/sysconfig/cloud-info
+ # if ctype == "vsphere"
+ # device = device with label 'CDROM'
+ # elif ctype == "rhev"
+ # device = /dev/floppy
+ # then, filesystem on that device must have
+ # user-data.txt or deltacloud-user-data.txt
+ local ctype="" dev=""
+ local match_rhev="[Rr][Hh][Ee][Vv]"
+ local match_vsphere="[Vv][Ss][Pp][Hh][Ee][Rr][Ee]"
+ local cinfo="${PATH_ROOT}/etc/sysconfig/cloud-info"
+ if [ -f "$cinfo" ]; then
+ read ctype < "$cinfo"
+ else
+ ctype="${DI_DMI_PRODUCT_NAME}"
+ fi
+ case "$ctype" in
+ ${match_rhev})
+ probe_floppy || return ${DS_NOT_FOUND}
+ dev="/dev/floppy"
+ ;;
+ ${match_vsphere})
+ block_dev_with_label CDROM || return ${DS_NOT_FOUND}
+ dev="$_RET"
+ ;;
+ *) return ${DS_NOT_FOUND};;
+ esac
+
+ # FIXME: need to check $dev for user-data.txt or deltacloud-user-data.txt
+ : "$dev"
+ return $DS_MAYBE
+}
+
+dscheck_SmartOS() {
+ # joyent cloud has two virt types: kvm and container
+ # on kvm, product name on joyent public cloud shows 'SmartDC HVM'
+ # on the container platform, uname's version has: BrandZ virtual linux
+ local smartdc_kver="BrandZ virtual linux"
+ dmi_product_name_matches "SmartDC*" && return $DS_FOUND
+ if [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] &&
+ [ "${DI_VIRT}" = "container-other" ]; then
+ return ${DS_FOUND}
+ fi
+ return ${DS_NOT_FOUND}
+}
+
+dscheck_None() {
+ return ${DS_NOT_FOUND}
+}
+
+collect_info() {
+ read_virt
+ read_pid1_platform
+ read_kernel_cmdline
+ read_uname_info
+ read_config
+ read_datasource_list
+ read_dmi_sys_vendor
+ read_dmi_product_name
+ read_dmi_product_serial
+ read_dmi_product_uuid
+ read_fs_labels
+}
+
+print_info() {
+ collect_info
+ _print_info
+}
+
+_print_info() {
+ local n="" v="" vars=""
+ vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL"
+ vars="$vars DMI_PRODUCT_UUID PID_1_PLATFORM"
+ vars="$vars FS_LABELS KERNEL_CMDLINE VIRT"
+ vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION"
+ vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM"
+ vars="$vars DSNAME DSLIST"
+ vars="$vars MODE REPORT ON_FOUND ON_MAYBE ON_NOTFOUND"
+ for v in ${vars}; do
+ eval n='${DI_'"$v"'}'
+ echo "$v=$n"
+ done
+ echo "pid=$$ ppid=$PPID"
+ is_container && echo "is_container=true" || echo "is_container=false"
+}
+
+write_result() {
+ local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" pre=""
+ {
+ if [ "$DI_REPORT" = "true" ]; then
+ echo "di_report:"
+ pre=" "
+ fi
+ for line in "$@"; do
+ echo "${pre}$line";
+ done
+ } > "$runcfg"
+ ret=$?
+ [ $ret -eq 0 ] || {
+ error "failed to write to ${runcfg}"
+ return $ret
+ }
+ return 0
+}
+
+record_notfound() {
+ # in report mode, report nothing was found.
+ # if not report mode: only report the negative result.
+ # reporting an empty list would mean cloud-init would not search
+ # any datasources.
+ if [ "$DI_REPORT" = "true" ]; then
+ found --
+ else
+ local msg="# reporting not found result. notfound=${DI_ON_NOTFOUND}."
+ local DI_REPORT="true"
+ found -- "$msg"
+ fi
+}
+
+found() {
+ # found(ds1, [ds2 ...], [-- [extra lines]])
+ local list="" ds=""
+ while [ $# -ne 0 ]; do
+ if [ "$1" = "--" ]; then
+ shift
+ break
+ fi
+ list="${list:+${list}, }$1"
+ shift
+ done
+ if [ $# -eq 1 ] && [ -z "$1" ]; then
+ # do not pass an empty line through.
+ shift
+ fi
+ # always write the None datasource last.
+ list="${list:+${list}, }None"
+ write_result "datasource_list: [ $list ]" "$@"
+ return
+}
+
+trim() {
+ set -- $*
+ _RET="$*"
+}
+
+unquote() {
+ # remove quotes from quoted value
+ local quote='"' tick="'"
+ local val="$1"
+ case "$val" in
+ ${quote}*${quote}|${tick}*${tick})
+ val=${val#?}; val=${val%?};;
+ esac
+ _RET="$val"
+}
+
+_read_config() {
+ # reads config from stdin,
+ # if no parameters are set, modifies _rc scoped environment vars.
+ # if keyname is provided, then returns found value of that key.
+ local keyname="${1:-_unset}"
+ local line="" hash="#" ckey="" key="" val=""
+ while read line; do
+ line=${line%%${hash}*}
+ key="${line%%:*}"
+
+ # no : in the line.
+ [ "$key" = "$line" ] && continue
+ trim "$key"
+ key=${_RET}
+
+ [ "$keyname" != "_unset" ] && [ "$keyname" != "$key" ] &&
+ continue
+
+ val="${line#*:}"
+ trim "$val"
+ unquote "${_RET}"
+ val=${_RET}
+
+ if [ "$keyname" = "$key" ]; then
+ _RET="$val"
+ return 0
+ fi
+
+ case "$key" in
+ datasource) _rc_dsname="$val";;
+ policy) _rc_policy="$val";;
+ esac
+ done
+ if [ "$keyname" = "_unset" ]; then
+ return 1
+ fi
+ _RET=""
+ return 0
+}
+
+parse_warn() {
+ echo "WARN: invalid value '$2' for key '$1'. Using $1=$3." 1>&2
+}
+
+parse_def_policy() {
+ local _rc_mode="" _rc_report="" _rc_found="" _rc_maybe="" _rc_notfound=""
+ local ret=""
+ parse_policy "$@"
+ ret=$?
+ _def_mode=$_rc_mode
+ _def_report=$_rc_report
+ _def_found=$_rc_found
+ _def_maybe=$_rc_maybe
+ _def_notfound=$_rc_notfound
+ return $ret
+}
+
+parse_policy() {
+ # parse_policy(policy, default)
+ # parse a policy string. sets
+ # _rc_mode (enable|disable,search)
+ # _rc_report true|false
+ # _rc_found first|all
+ # _rc_maybe all|none
+ # _rc_notfound enable|disable
+ local def=""
+ case "$DI_UNAME_MACHINE" in
+ # these have dmi data
+ i?86|x86_64) def=${DI_DEFAULT_POLICY};;
+ # aarch64 has dmi, but not currently used (LP: #1663304)
+ aarch64) def=${DI_DEFAULT_POLICY_NO_DMI};;
+ *) def=${DI_DEFAULT_POLICY_NO_DMI};;
+ esac
+ local policy="$1"
+ local _def_mode="" _def_report="" _def_found="" _def_maybe=""
+ local _def_notfound=""
+ if [ $# -eq 1 ] || [ "$2" != "-" ]; then
+ def=${2:-${def}}
+ parse_def_policy "$def" -
+ fi
+
+ local mode="" report="" found="" maybe="" notfound=""
+ local oifs="$IFS" tok="" val=""
+ IFS=","; set -- $policy; IFS="$oifs"
+ for tok in "$@"; do
+ val=${tok#*=}
+ case "$tok" in
+ report) report=true;;
+ $DI_ENABLED|$DI_DISABLED|search) mode=$tok;;
+ found=all|found=first) found=$val;;
+ maybe=all|maybe=none) maybe=$val;;
+ notfound=$DI_ENABLED|notfound=$DI_DISABLED) notfound=$val;;
+ found=*)
+ parse_warn found "$val" "${_def_found}"
+ found=${_def_found};;
+ maybe=*)
+ parse_warn maybe "$val" "${_def_maybe}"
+ maybe=${_def_maybe};;
+ notfound=*)
+ parse_warn notfound "$val" "${_def_notfound}"
+ notfound=${_def_notfound};;
+ esac
+ done
+ report=${report:-${_def_report:-false}}
+ _rc_report=${report}
+ _rc_mode=${mode:-${_def_mode}}
+ _rc_found=${found:-${_def_found}}
+ _rc_maybe=${maybe:-${_def_maybe}}
+ _rc_notfound=${notfound:-${_def_notfound}}
+}
+
+read_config() {
+ local config="${PATH_DI_CONFIG}"
+ local _rc_dsname="" _rc_policy="" ret=""
+ if [ -f "$config" ]; then
+ _read_config < "$config"
+ ret=$?
+ elif [ -e "$config" ]; then
+ error "$config exists but is not a file!"
+ ret=1
+ fi
+ local tok="" key="" val=""
+ for tok in ${DI_KERNEL_CMDLINE}; do
+ key=${tok%%=*}
+ val=${tok#*=}
+ case "$key" in
+ ci.ds) _rc_dsname="$val";;
+ ci.datasource) _rc_dsname="$val";;
+ ci.di.policy) _rc_policy="$val";;
+ esac
+ done
+
+ local _rc_mode _rc_report _rc_found _rc_maybe _rc_notfound
+ parse_policy "${_rc_policy}"
+ debug 1 "policy loaded: mode=${_rc_mode} report=${_rc_report}" \
+ "found=${_rc_found} maybe=${_rc_maybe} notfound=${_rc_notfound}"
+ DI_MODE=${_rc_mode}
+ DI_REPORT=${_rc_report}
+ DI_ON_FOUND=${_rc_found}
+ DI_ON_MAYBE=${_rc_maybe}
+ DI_ON_NOTFOUND=${_rc_notfound}
+
+ DI_DSNAME="${_rc_dsname}"
+ return $ret
+}
+
+
+manual_clean_and_existing() {
+ [ -f "${PATH_VAR_LIB_CLOUD}/instance/manual-clean" ]
+}
+
+read_uptime() {
+ local up idle
+ _RET="${UNAVAILABLE}"
+ [ -f "$PATH_PROC_UPTIME" ] &&
+ read up idle < "$PATH_PROC_UPTIME" && _RET="$up"
+ return
+}
+
+_main() {
+ local dscheck="" ret_dis=1 ret_en=0
+
+ read_uptime
+ debug 1 "[up ${_RET}s]" "ds-identify $*"
+ collect_info
+
+ if [ "$DI_LOG" = "stderr" ]; then
+ _print_info 1>&2
+ else
+ _print_info >> "$DI_LOG"
+ fi
+
+ case "$DI_MODE" in
+ $DI_DISABLED)
+ debug 1 "mode=$DI_DISABLED. returning $ret_dis"
+ return $ret_dis
+ ;;
+ $DI_ENABLED)
+ debug 1 "mode=$DI_ENABLED. returning $ret_en"
+ return $ret_en;;
+ search) :;;
+ esac
+
+ if [ -n "${DI_DSNAME}" ]; then
+ debug 1 "datasource '$DI_DSNAME' specified."
+ found "$DI_DSNAME"
+ return
+ fi
+
+ if manual_clean_and_existing; then
+ debug 1 "manual_cache_clean enabled. Not writing datasource_list."
+ write_result "# manual_cache_clean."
+ return
+ fi
+
+ # if there is only a single entry in $DI_DSLIST
+ set -- $DI_DSLIST
+ if [ $# -eq 1 ] || [ $# -eq 2 -a "$2" = "None" ] ; then
+ debug 1 "single entry in datasource_list ($DI_DSLIST) use that."
+ found "$@"
+ return
+ fi
+
+ local found="" ret="" ds="" maybe="" _RET_excfg=""
+ local exfound_cfg="" exmaybe_cfg=""
+ for ds in ${DI_DSLIST}; do
+ dscheck_fn="dscheck_${ds}"
+ debug 2 "Checking for datasource '$ds' via '$dscheck_fn'"
+ if ! type "$dscheck_fn" >/dev/null 2>&1; then
+ warn "No check method '$dscheck_fn' for datasource '$ds'"
+ continue
+ fi
+ _RET_excfg=""
+ $dscheck_fn
+ ret="$?"
+ case "$ret" in
+ $DS_FOUND)
+ debug 1 "check for '$ds' returned found";
+ exfound_cfg="${exfound_cfg:+${exfound_cfg}${CR}}${_RET_excfg}"
+ found="${found} $ds";;
+ $DS_MAYBE)
+ debug 1 "check for '$ds' returned maybe";
+ exmaybe_cfg="${exmaybe_cfg:+${exmaybe_cfg}${CR}}${_RET_excfg}"
+ maybe="${maybe} $ds";;
+ *) debug 2 "check for '$ds' returned not-found[$ret]";;
+ esac
+ done
+
+ debug 2 "found=${found# } maybe=${maybe# }"
+ set -- $found
+ if [ $# -ne 0 ]; then
+ if [ $# -eq 1 ]; then
+ debug 1 "Found single datasource: $1"
+ else
+ # found=all
+ debug 1 "Found $# datasources found=${DI_ON_FOUND}: $*"
+ if [ "${DI_ON_FOUND}" = "first" ]; then
+ set -- "$1"
+ fi
+ fi
+ found "$@" -- "${exfound_cfg}"
+ return
+ fi
+
+ set -- $maybe
+ if [ $# -ne 0 -a "${DI_ON_MAYBE}" != "none" ]; then
+ debug 1 "$# datasources returned maybe: $*"
+ found "$@" -- "${exmaybe_cfg}"
+ return
+ fi
+
+ # record the empty result.
+ record_notfound
+ case "$DI_ON_NOTFOUND" in
+ $DI_DISABLED)
+ debug 1 "No result. notfound=$DI_DISABLED. returning $ret_dis."
+ return $ret_dis
+ ;;
+ $DI_ENABLED)
+ debug 1 "No result. notfound=$DI_ENABLED. returning $ret_en"
+ return $ret_en;;
+ esac
+
+ error "Unexpected result"
+ return 3
+}
+
+main() {
+ local ret=""
+ [ -d "$PATH_RUN_CI" ] || mkdir -p "$PATH_RUN_CI"
+ if [ "${1:+$1}" != "--force" ] && [ -f "$PATH_RUN_CI_CFG" ] &&
+ [ -f "$PATH_RUN_DI_RESULT" ]; then
+ if read ret < "$PATH_RUN_DI_RESULT"; then
+ if [ "$ret" = "0" ] || [ "$ret" = "1" ]; then
+ debug 2 "used cached result $ret. pass --force to re-run."
+ return $ret;
+ fi
+ debug 1 "previous run returned unexpected '$ret'. Re-running."
+ else
+ error "failed to read result from $PATH_RUN_DI_RESULT!"
+ fi
+ fi
+ _main "$@"
+ ret=$?
+ echo "$ret" > "$PATH_RUN_DI_RESULT"
+ read_uptime
+ debug 1 "[up ${_RET}s]" "returning $ret"
+ return $ret
+}
+
+noop() {
+ :
+}
+
+case "${DI_MAIN}" in
+ main|print_info|noop) "${DI_MAIN}" "$@";;
+ *) error "unexpected value for DI_MAIN"; exit 1;;
+esac
+
+# vi: syntax=sh ts=4 expandtab
diff --git a/tools/make-mime.py b/tools/make-mime.py
index 12727126..f6a72044 100755
--- a/tools/make-mime.py
+++ b/tools/make-mime.py
@@ -22,7 +22,7 @@ def file_content_type(text):
try:
filename, content_type = text.split(":", 1)
return (open(filename, 'r'), filename, content_type.strip())
- except:
+ except ValueError:
raise argparse.ArgumentError("Invalid value for %r" % (text))
diff --git a/tools/make-tarball b/tools/make-tarball
index c150dd2f..91c45624 100755
--- a/tools/make-tarball
+++ b/tools/make-tarball
@@ -35,7 +35,7 @@ while [ $# -ne 0 ]; do
done
rev=${1:-HEAD}
-version=$(git describe ${long_opt} $rev)
+version=$(git describe "--match=[0-9]*" ${long_opt} $rev)
archive_base="cloud-init-$version"
if [ -z "$output" ]; then
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index d74f9e31..95fc4659 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -18,10 +18,10 @@ Then:
"""
import functools
-import httplib
import json
import logging
import os
+import socket
import random
import string
import sys
@@ -29,7 +29,13 @@ import yaml
from optparse import OptionParser
-from BaseHTTPServer import (HTTPServer, BaseHTTPRequestHandler)
+try:
+ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+ import httplib as hclient
+except ImportError:
+ from http.server import HTTPServer, BaseHTTPRequestHandler
+ from http import client as hclient
+
log = logging.getLogger('meta-server')
@@ -183,6 +189,10 @@ def get_ssh_keys():
return keys
+class HTTPServerV6(HTTPServer):
+ address_family = socket.AF_INET6
+
+
class MetaDataHandler(object):
def __init__(self, opts):
@@ -249,8 +259,11 @@ class MetaDataHandler(object):
try:
key_id = int(mybe_key)
key_name = key_ids[key_id]
- except:
- raise WebException(httplib.BAD_REQUEST,
+ except ValueError:
+ raise WebException(hclient.BAD_REQUEST,
+ "%s: not an integer" % mybe_key)
+ except KeyError:
+ raise WebException(hclient.BAD_REQUEST,
"Unknown key id %r" % mybe_key)
# Extract the possible sub-params
result = traverse(nparams[1:], {
@@ -342,13 +355,13 @@ class Ec2Handler(BaseHTTPRequestHandler):
return self._get_versions
date = segments[0].strip().lower()
if date not in self._get_versions():
- raise WebException(httplib.BAD_REQUEST,
+ raise WebException(hclient.BAD_REQUEST,
"Unknown version format %r" % date)
if len(segments) < 2:
- raise WebException(httplib.BAD_REQUEST, "No action provided")
+ raise WebException(hclient.BAD_REQUEST, "No action provided")
look_name = segments[1].lower()
if look_name not in func_mapping:
- raise WebException(httplib.BAD_REQUEST,
+ raise WebException(hclient.BAD_REQUEST,
"Unknown requested data %r" % look_name)
base_func = func_mapping[look_name]
who = self.address_string()
@@ -371,16 +384,16 @@ class Ec2Handler(BaseHTTPRequestHandler):
data = func()
if not data:
data = ''
- self.send_response(httplib.OK)
+ self.send_response(hclient.OK)
self.send_header("Content-Type", "binary/octet-stream")
self.send_header("Content-Length", len(data))
log.info("Sending data (len=%s):\n%s", len(data),
format_text(data))
self.end_headers()
- self.wfile.write(data)
+ self.wfile.write(data.encode())
except RuntimeError as e:
log.exception("Error somewhere in the server.")
- self.send_error(httplib.INTERNAL_SERVER_ERROR, message=str(e))
+ self.send_error(hclient.INTERNAL_SERVER_ERROR, message=str(e))
except WebException as e:
code = e.code
log.exception(str(e))
@@ -408,7 +421,7 @@ def extract_opts():
help=("port from which to serve traffic"
" (default: %default)"))
parser.add_option("-a", "--addr", dest="address", action="store", type=str,
- default='0.0.0.0', metavar="ADDRESS",
+ default='::', metavar="ADDRESS",
help=("address from which to serve traffic"
" (default: %default)"))
parser.add_option("-f", '--user-data-file', dest='user_data_file',
@@ -444,7 +457,7 @@ def run_server():
setup_fetchers(opts)
log.info("CLI opts: %s", opts)
server_address = (opts['address'], opts['port'])
- server = HTTPServer(server_address, Ec2Handler)
+ server = HTTPServerV6(server_address, Ec2Handler)
sa = server.socket.getsockname()
log.info("Serving ec2 metadata on %s using port %s ...", sa[0], sa[1])
server.serve_forever()
diff --git a/tools/read-version b/tools/read-version
index 3b30b497..ddb28383 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -56,7 +56,7 @@ if os.path.isdir(os.path.join(_tdir, ".git")) and which("git"):
flags = []
if use_tags:
flags = ['--tags']
- cmd = ['git', 'describe'] + flags
+ cmd = ['git', 'describe', '--match=[0-9]*'] + flags
version = tiny_p(cmd).strip()
diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py
index d8bbcfcb..a57ea847 100755
--- a/tools/validate-yaml.py
+++ b/tools/validate-yaml.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
"""Try to read a YAML file and report any errors.
"""
diff --git a/tox.ini b/tox.ini
index e79ea6aa..ca5d8b87 100644
--- a/tox.ini
+++ b/tox.ini
@@ -79,3 +79,11 @@ deps =
jsonpatch==1.2
six==1.9.0
-r{toxinidir}/test-requirements.txt
+
+[testenv:tip-pycodestyle]
+commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/}
+deps = pycodestyle
+
+[testenv:tip-pyflakes]
+commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
+deps = pyflakes