From a1b185d0cce5064e9b36b4db7b55564e2ab1d7a8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Oct 2016 22:53:17 -0400 Subject: Get early logging logged, including failures of cmdline url. Failures to load the kernel command line's url (cloud-config-url=) would previously get swallowed. This should make it much more obvious when that happens. With logging going to expected places at sane levels (WARN will go to stderr by default). --- cloudinit/cmd/main.py | 118 +++++++++++++++++++++++++++++++++++----- cloudinit/util.py | 44 --------------- tests/unittests/helpers.py | 16 ++++-- tests/unittests/test__init__.py | 92 ++++++++++++++++++++----------- 4 files changed, 173 insertions(+), 97 deletions(-) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index c83496c5..65b15edc 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -26,6 +26,7 @@ from cloudinit import signal_handler from cloudinit import sources from cloudinit import stages from cloudinit import templater +from cloudinit import url_helper from cloudinit import util from cloudinit import version @@ -129,23 +130,104 @@ def apply_reporting_cfg(cfg): reporting.update_configuration(cfg.get('reporting')) +def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')): + data = util.keyval_str_to_dict(cmdline) + for key in names: + if key in data: + return key, data[key] + raise KeyError("No keys (%s) found in string '%s'" % + (cmdline, names)) + + +def attempt_cmdline_url(path, network=True, cmdline=None): + """Write data from url referenced in command line to path. + + path: a file to write content to if downloaded. + network: should network access be assumed. + cmdline: the cmdline to parse for cloud-config-url. + + This is used in MAAS datasource, in "ephemeral" (read-only root) + environment where the instance netboots to iscsi ro root. + and the entity that controls the pxe config has to configure + the maas datasource. + + An attempt is made on network urls even in local datasource + for case of network set up in initramfs. + + Return value is a tuple of a logger function (logging.DEBUG) + and a message indicating what happened. + """ + + if cmdline is None: + cmdline = util.get_cmdline() + + try: + cmdline_name, url = parse_cmdline_url(cmdline) + except KeyError: + return (logging.DEBUG, "No kernel command line url found.") + + path_is_local = url.startswith("file://") or url.startswith("/") + + if path_is_local and os.path.exists(path): + if network: + m = ("file '%s' existed, possibly from local stage download" + " of command line url '%s'. Not re-writing." % (path, url)) + level = logging.INFO + if path_is_local: + level = logging.DEBUG + else: + m = ("file '%s' existed, possibly from previous boot download" + " of command line url '%s'. Not re-writing." % (path, url)) + level = logging.WARN + + return (level, m) + + kwargs = {'url': url, 'timeout': 10, 'retries': 2} + if network or path_is_local: + level = logging.WARN + kwargs['sec_between'] = 1 + else: + level = logging.DEBUG + kwargs['sec_between'] = .1 + + data = None + header = b'#cloud-config' + try: + resp = util.read_file_or_url(**kwargs) + if resp.ok(): + data = resp.contents + if not resp.contents.startswith(header): + if cmdline_name == 'cloud-config-url': + level = logging.WARN + else: + level = logging.INFO + return ( + level, + "contents of '%s' did not start with %s" % (url, header)) + else: + return (level, + "url '%s' returned code %s. Ignoring." % (url, resp.code)) + + except url_helper.UrlError as e: + return (level, "retrieving url '%s' failed: %s" % (url, e)) + + util.write_file(path, data, mode=0o600) + return (logging.INFO, + "wrote cloud-config data from %s='%s' to %s" % + (cmdline_name, url, path)) + + def main_init(name, args): deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] if args.local: deps = [sources.DEP_FILESYSTEM] - if not args.local: - # See doc/kernel-cmdline.txt - # - # This is used in maas datasource, in "ephemeral" (read-only root) - # environment where the instance netboots to iscsi ro root. - # and the entity that controls the pxe config has to configure - # the maas datasource. - # - # Could be used elsewhere, only works on network based (not local). - root_name = "%s.d" % (CLOUD_CONFIG) - target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg") - util.read_write_cmdline_url(target_fn) + early_logs = [] + early_logs.append( + attempt_cmdline_url( + path=os.path.join("%s.d" % CLOUD_CONFIG, + "91_kernel_cmdline_url.cfg"), + network=not args.local)) # Cloud-init 'init' stage is broken up into the following sub-stages # 1. Ensure that the init object fetches its config without errors @@ -171,12 +253,14 @@ def main_init(name, args): outfmt = None errfmt = None try: - LOG.debug("Closing stdin") + early_logs.append((logging.DEBUG, "Closing stdin.")) util.close_stdin() (outfmt, errfmt) = util.fixup_output(init.cfg, name) except Exception: - util.logexc(LOG, "Failed to setup output redirection!") - print_exc("Failed to setup output redirection!") + msg = "Failed to setup output redirection!" + util.logexc(LOG, msg) + print_exc(msg) + early_logs.append((logging.WARN, msg)) if args.debug: # Reset so that all the debug handlers are closed out LOG.debug(("Logging being reset, this logger may no" @@ -190,6 +274,10 @@ def main_init(name, args): # been redirected and log now configured. welcome(name, msg=w_msg) + # re-play early log messages before logging was setup + for lvl, msg in early_logs: + LOG.log(lvl, msg) + # Stage 3 try: init.initialize() diff --git a/cloudinit/util.py b/cloudinit/util.py index 5725129e..7196a7ca 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1089,31 +1089,6 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"): return fqdn -def get_cmdline_url(names=('cloud-config-url', 'url'), - starts=b"#cloud-config", cmdline=None): - if cmdline is None: - cmdline = get_cmdline() - - data = keyval_str_to_dict(cmdline) - url = None - key = None - for key in names: - if key in data: - url = data[key] - break - - if not url: - return (None, None, None) - - resp = read_file_or_url(url) - # allow callers to pass starts as text when comparing to bytes contents - starts = encode_text(starts) - if resp.ok() and resp.contents.startswith(starts): - return (key, url, resp.contents) - - return (key, url, None) - - def is_resolvable(name): """determine if a url is resolvable, return a boolean This also attempts to be resilent against dns redirection. @@ -1475,25 +1450,6 @@ def ensure_dirs(dirlist, mode=0o755): ensure_dir(d, mode) -def read_write_cmdline_url(target_fn): - if not os.path.exists(target_fn): - try: - (key, url, content) = get_cmdline_url() - except Exception: - logexc(LOG, "Failed fetching command line url") - return - try: - if key and content: - write_file(target_fn, content, mode=0o600) - LOG.debug(("Wrote to %s with contents of command line" - " url %s (len=%s)"), target_fn, url, len(content)) - elif key and not content: - LOG.debug(("Command line key %s with url" - " %s had no contents"), key, url) - except Exception: - logexc(LOG, "Failed writing url content to %s", target_fn) - - def yaml_dumps(obj, explicit_start=True, explicit_end=True): return yaml.safe_dump(obj, line_break="\n", diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index cf3b46d2..64e56d98 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -264,16 +264,22 @@ class HttprettyTestCase(TestCase): class TempDirTestCase(TestCase): # provide a tempdir per class, not per test. - def setUp(self): - super(TempDirTestCase, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + @classmethod + def setUpClass(cls): + cls.tmpd = tempfile.mkdtemp(prefix="ci-%s." % cls.__name__) + return TestCase.setUpClass() + + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.tmpd) + return TestCase.tearDownClass() def tmp_path(self, path): + # if absolute path (starts with /), then make ./path if path.startswith(os.path.sep): path = "." + path - return os.path.normpath(os.path.join(self.tmp, path)) + return os.path.normpath(os.path.join(self.tmpd, path)) def populate_dir(path, files): diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 7b6f8c4e..e6f4c318 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -1,16 +1,18 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging import os import shutil import tempfile +from cloudinit.cmd import main from cloudinit import handlers from cloudinit import helpers from cloudinit import settings from cloudinit import url_helper from cloudinit import util -from .helpers import TestCase, ExitStack, mock +from .helpers import TestCase, TempDirTestCase, ExitStack, mock class FakeModule(handlers.Handler): @@ -170,44 +172,68 @@ class TestHandlerHandlePart(TestCase): self.data, self.ctype, self.filename, self.payload) -class TestCmdlineUrl(TestCase): - def test_invalid_content(self): - url = "http://example.com/foo" - key = "mykey" - payload = b"0" - cmdline = "ro %s=%s bar=1" % (key, url) +class TestCmdlineUrl(TempDirTestCase): + def test_parse_cmdline_url_nokey_raises_keyerror(self): + self.assertRaises( + KeyError, main.parse_cmdline_url, 'root=foo bar single') - with mock.patch('cloudinit.url_helper.readurl', - return_value=url_helper.StringResponse(payload)): - self.assertEqual( - util.get_cmdline_url(names=[key], starts="xxxxxx", - cmdline=cmdline), - (key, url, None)) + def test_parse_cmdline_url_found(self): + cmdline = 'root=foo bar single url=http://example.com arg1 -v' + self.assertEqual( + ('url', 'http://example.com'), main.parse_cmdline_url(cmdline)) - def test_valid_content(self): - url = "http://example.com/foo" - key = "mykey" - payload = b"xcloud-config\nmydata: foo\nbar: wark\n" + @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + def test_invalid_content(self, m_read): + key = "cloud-config-url" + url = 'http://example.com/foo' cmdline = "ro %s=%s bar=1" % (key, url) + m_read.return_value = url_helper.StringResponse(b"unexpected blob") - with mock.patch('cloudinit.url_helper.readurl', - return_value=url_helper.StringResponse(payload)): - self.assertEqual( - util.get_cmdline_url(names=[key], starts=b"xcloud-config", - cmdline=cmdline), - (key, url, payload)) + fpath = self.tmp_path("test_valid") + lvl, msg = main.attempt_cmdline_url( + fpath, network=True, cmdline=cmdline) + self.assertEqual(logging.WARN, lvl) + self.assertIn(url, msg) + self.assertFalse(os.path.exists(fpath)) - def test_no_key_found(self): + @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + def test_valid_content(self, m_read): url = "http://example.com/foo" - key = "mykey" - cmdline = "ro %s=%s bar=1" % (key, url) - - with mock.patch('cloudinit.url_helper.readurl', - return_value=url_helper.StringResponse(b'')): - self.assertEqual( - util.get_cmdline_url(names=["does-not-appear"], - starts="#cloud-config", cmdline=cmdline), - (None, None, None)) + payload = b"#cloud-config\nmydata: foo\nbar: wark\n" + cmdline = "ro %s=%s bar=1" % ('cloud-config-url', url) + + m_read.return_value = url_helper.StringResponse(payload) + fpath = self.tmp_path("test_valid") + lvl, msg = main.attempt_cmdline_url( + fpath, network=True, cmdline=cmdline) + self.assertEqual(util.load_file(fpath, decode=False), payload) + self.assertEqual(logging.INFO, lvl) + self.assertIn(url, msg) + + @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + def test_no_key_found(self, m_read): + cmdline = "ro mykey=http://example.com/foo root=foo" + fpath = self.tmp_path("test_no_key_found") + lvl, msg = main.attempt_cmdline_url( + fpath, network=True, cmdline=cmdline) + + m_read.assert_not_called() + self.assertFalse(os.path.exists(fpath)) + self.assertEqual(logging.DEBUG, lvl) + + @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + def test_exception_warns(self, m_read): + url = "http://example.com/foo" + cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url + fpath = self.tmp_path("test_no_key_found") + m_read.side_effect = url_helper.UrlError( + cause="Unexpected Error", url="http://example.com/foo") + + lvl, msg = main.attempt_cmdline_url( + fpath, network=True, cmdline=cmdline) + self.assertEqual(logging.WARN, lvl) + self.assertIn(url, msg) + self.assertFalse(os.path.exists(fpath)) # vi: ts=4 expandtab -- cgit v1.2.3 From e55ff8f8ea9abeb7c406b2eec3e91aad8fee6f64 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 11 Jan 2017 16:20:10 -0500 Subject: validate-yaml: use python rather than explicitly python3 The change here is to use '/usr/bin/env python' in validate-yaml.py as all other tools/*.py do. Additionally, change the Makefile to invoke validate-yaml.py with the python that it has selected for other things (PYVER). --- Makefile | 2 +- tools/validate-yaml.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 5d35dcc0..ed631cf7 100644 --- a/Makefile +++ b/Makefile @@ -73,7 +73,7 @@ clean: clean_pyc rm -rf /var/log/cloud-init.log /var/lib/cloud/ yaml: - @$(CWD)/tools/validate-yaml.py $(YAML_FILES) + @$(PYVER) $(CWD)/tools/validate-yaml.py $(YAML_FILES) rpm: ./packages/brpm --distro $(distro) diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py index d8bbcfcb..a57ea847 100755 --- a/tools/validate-yaml.py +++ b/tools/validate-yaml.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python """Try to read a YAML file and report any errors. """ -- cgit v1.2.3 From a3daf184fd47dede8d91588281437453bd38fc1c Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 1 Dec 2016 19:40:36 -0500 Subject: Use dnf instead of yum when available Recent fedora releases use "dnf" instead of "yum" for package management. While there is a compatible "yum" cli available, there's no guarantee that it will be available. With this patch, cloud-init will check for /usr/bin/dnf and use that if it exists instead of yum. rhbz: https://bugzilla.redhat.com/show_bug.cgi?id=1194451 LP: #1647118 --- cloudinit/distros/rhel.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index aa558381..7498c63a 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -190,13 +190,18 @@ class Distro(distros.Distro): if pkgs is None: pkgs = [] - cmd = ['yum'] - # If enabled, then yum will be tolerant of errors on the command line - # with regard to packages. - # For example: if you request to install foo, bar and baz and baz is - # installed; yum won't error out complaining that baz is already - # installed. - cmd.append("-t") + if util.which('dnf'): + LOG.debug('Using DNF for package management') + cmd = ['dnf'] + else: + LOG.debug('Using YUM for package management') + # the '-t' argument makes yum tolerant of errors on the command + # line with regard to packages. + # + # For example: if you request to install foo, bar and baz and baz + # is installed; yum won't error out complaining that baz is already + # installed. + cmd = ['yum', '-t'] # Determines whether or not yum prompts for confirmation # of critical actions. We don't want to prompt... cmd.append("-y") -- cgit v1.2.3 From e2274393b882c723ab93189c57e7e68a46e4e10f Mon Sep 17 00:00:00 2001 From: Jeremy Bicha Date: Thu, 12 Jan 2017 20:00:55 -0500 Subject: Fix minor docs typo: perserve > preserve --- cloudinit/config/cc_set_hostname.py | 2 +- doc/examples/cloud-config.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index e42799f9..aa3dfe5f 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -27,7 +27,7 @@ will be used. **Config keys**:: - perserve_hostname: + preserve_hostname: fqdn: hostname: """ diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index c5f84b13..c03f1026 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -200,7 +200,7 @@ ssh_import_id: [smoser] # # Default: none # -debconf_selections: | # Need to perserve newlines +debconf_selections: | # Need to preserve newlines # Force debconf priority to critical. debconf debconf/priority select critical -- cgit v1.2.3 From 8ddb57149281ba2658696f19c1eb96e7769381e4 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 6 Sep 2016 14:51:32 -0700 Subject: Fixed Misc issues related to VMware customization. - staticIPV4 property can be either None or a valid Array. Need to check for None before accessing the ip address. - Modified few misc. log messages. - Added a new log message while waiting for the customization config file. - Added support to configure the maximum amount of time to wait for the customization config file. - VMware Customization Support is provided only for DataSourceOVF class and not for any other child classes. Implemented a new variable vmware_customization_supported to check whether the 'VMware Customization' support is available for a specific datasource or not. - Changed the function get_vmware_cust_settings to get_max_wait_from_cfg. - Removed the code that does 'ifdown and iup' in NIC configurator. --- cloudinit/sources/DataSourceOVF.py | 37 +++++++++++++++++++--- cloudinit/sources/helpers/vmware/imc/config_nic.py | 24 ++++---------- 2 files changed, 39 insertions(+), 22 deletions(-) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 78928c77..d70784ac 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -48,6 +48,7 @@ class DataSourceOVF(sources.DataSource): self.environment = None self.cfg = {} self.supported_seed_starts = ("/", "file://") + self.vmware_customization_supported = True def __str__(self): root = sources.DataSource.__str__(self) @@ -78,7 +79,10 @@ class DataSourceOVF(sources.DataSource): found.append(seed) elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") - if not util.get_cfg_option_bool( + if not self.vmware_customization_supported: + LOG.debug("Skipping the check for " + "VMware Customization support") + elif not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") @@ -90,17 +94,18 @@ class DataSourceOVF(sources.DataSource): # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs # to search for the file in that directory. + max_wait = get_max_wait_from_cfg(self.ds_cfg) vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", func=wait_for_imc_cfg_file, - args=("/var/run/vmware-imc", "cust.cfg")) + args=("/var/run/vmware-imc", "cust.cfg", max_wait)) if vmwareImcConfigFilePath: - LOG.debug("Found VMware DeployPkg Config File at %s" % + LOG.debug("Found VMware Customization Config File at %s", vmwareImcConfigFilePath) else: - LOG.debug("Did not find VMware DeployPkg Config File Path") + LOG.debug("Did not find VMware Customization Config File") else: LOG.debug("Customization for VMware platform is disabled.") @@ -206,6 +211,29 @@ class DataSourceOVFNet(DataSourceOVF): DataSourceOVF.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net') self.supported_seed_starts = ("http://", "https://", "ftp://") + self.vmware_customization_supported = False + + +def get_max_wait_from_cfg(cfg): + default_max_wait = 90 + max_wait_cfg_option = 'vmware_cust_file_max_wait' + max_wait = default_max_wait + + if not cfg: + return max_wait + + try: + max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait)) + except ValueError: + LOG.warn("Failed to get '%s', using %s", + max_wait_cfg_option, default_max_wait) + + if max_wait <= 0: + LOG.warn("Invalid value '%s' for '%s', using '%s' instead", + max_wait, max_wait_cfg_option, default_max_wait) + max_wait = default_max_wait + + return max_wait def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): @@ -215,6 +243,7 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): fileFullPath = search_file(dirpath, filename) if fileFullPath: return fileFullPath + LOG.debug("Waiting for VMware Customization Config File") time.sleep(naplen) waited += naplen return None diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index d5a7c346..67ac21db 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -101,7 +101,11 @@ class NicConfigurator(object): return lines # Static Ipv4 - v4 = nic.staticIpv4 + addrs = nic.staticIpv4 + if not addrs: + return lines + + v4 = addrs[0] if v4.ip: lines.append(' address %s' % v4.ip) if v4.netmask: @@ -197,22 +201,6 @@ class NicConfigurator(object): util.subp(["pkill", "dhclient"], rcs=[0, 1]) util.subp(["rm", "-f", "/var/lib/dhcp/*"]) - def if_down_up(self): - names = [] - for nic in self.nics: - name = self.mac2Name.get(nic.mac.lower()) - names.append(name) - - for name in names: - logger.info('Bring down interface %s' % name) - util.subp(["ifdown", "%s" % name]) - - self.clear_dhcp() - - for name in names: - logger.info('Bring up interface %s' % name) - util.subp(["ifup", "%s" % name]) - def configure(self): """ Configure the /etc/network/intefaces @@ -232,6 +220,6 @@ class NicConfigurator(object): for line in lines: fp.write('%s\n' % line) - self.if_down_up() + self.clear_dhcp() # vi: ts=4 expandtab -- cgit v1.2.3 From 4cf53f1544f8f5629330eab3efef1a18255c277a Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Tue, 17 Jan 2017 08:53:22 -0500 Subject: OpenStack: Use timeout and retries from config in get_data. This modifies get_data in DataSourceOpenStack.py to get the timeout and retries values from the data source configuration, rather than from keyword arguments. This permits get_data to use the same timeout as other methods, and allows an operator to increase the timeout in environments where the metadata service takes longer than five seconds to respond. LP: #1657130 Resolves: rhbz#1408589 --- cloudinit/sources/DataSourceOpenStack.py | 15 ++++++++++++--- tests/unittests/test_datasource/test_openstack.py | 8 ++++---- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 2a58f1cd..e1ea21f8 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -45,6 +45,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): # max_wait < 0 indicates do not wait max_wait = -1 timeout = 10 + retries = 5 try: max_wait = int(self.ds_cfg.get("max_wait", max_wait)) @@ -55,7 +56,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) except Exception: util.logexc(LOG, "Failed to get timeout, using %s", timeout) - return (max_wait, timeout) + + try: + retries = int(self.ds_cfg.get("retries", retries)) + except Exception: + util.logexc(LOG, "Failed to get max wait. using %s", retries) + + return (max_wait, timeout, retries) def wait_for_metadata_service(self): urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) @@ -76,7 +83,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls.append(md_url) url2base[md_url] = url - (max_wait, timeout) = self._get_url_settings() + (max_wait, timeout, retries) = self._get_url_settings() start_time = time.time() avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, timeout=timeout) @@ -89,13 +96,15 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.metadata_address = url2base.get(avail_url) return bool(avail_url) - def get_data(self, retries=5, timeout=5): + def get_data(self): try: if not self.wait_for_metadata_service(): return False except IOError: return False + (max_wait, timeout, retries) = self._get_url_settings() + try: results = util.log_time(LOG.debug, 'Crawl of openstack metadata service', diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index e5b6fcc6..28e1833e 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -232,7 +232,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): None, helpers.Paths({})) self.assertIsNone(ds_os.version) - found = ds_os.get_data(timeout=0.1, retries=0) + found = ds_os.get_data() self.assertTrue(found) self.assertEqual(2, ds_os.version) md = dict(ds_os.metadata) @@ -256,7 +256,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): None, helpers.Paths({})) self.assertIsNone(ds_os.version) - found = ds_os.get_data(timeout=0.1, retries=0) + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) @@ -275,7 +275,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'timeout': 0, } self.assertIsNone(ds_os.version) - found = ds_os.get_data(timeout=0.1, retries=0) + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) @@ -298,7 +298,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'timeout': 0, } self.assertIsNone(ds_os.version) - found = ds_os.get_data(timeout=0.1, retries=0) + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) -- cgit v1.2.3 From 47680bd623b295857dd18962523dccb33861b4e3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 17 Jan 2017 11:00:35 -0500 Subject: doc: add some documentation on OpenStack datasource. This just fills out some of the documentation on the OpenStack datasource. --- doc/rtd/topics/datasources/openstack.rst | 36 +++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst index ea47ea85..164b0e0c 100644 --- a/doc/rtd/topics/datasources/openstack.rst +++ b/doc/rtd/topics/datasources/openstack.rst @@ -1,7 +1,41 @@ OpenStack ========= -*TODO* +This datasource supports reading data from the +`OpenStack Metadata Service +`_. + +Configuration +------------- +The following configuration can be set for the datasource in system +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). + +The settings that may be configured are: + + * **metadata_urls**: This list of urls will be searched for an OpenStack + metadata service. The first entry that successfully returns a 200 response + for /openstack will be selected. (default: ['http://169.254.169.254']). + * **max_wait**: the maximum amount of clock time in seconds that should be + spent searching metadata_urls. A value less than zero will result in only + one request being made, to the first in the list. (default: -1) + * **timeout**: the timeout value provided to urlopen for each individual http + request. This is used both when selecting a metadata_url and when crawling + the metadata service. (default: 10) + * **retries**: The number of retries that should be done for an http request. + This value is used only after metadata_url is selected. (default: 5) + +An example configuration with the default values is provided as example below: + +.. sourcecode:: yaml + + #cloud-config + datasource: + OpenStack: + metadata_urls: ["http://169.254.169.254"] + max_wait: -1 + timeout: 10 + retries: 5 + Vendor Data ----------- -- cgit v1.2.3 From 145410f81c144a46cf5ce0324ff4454fa9f54ad0 Mon Sep 17 00:00:00 2001 From: "Erik M. Bray" Date: Tue, 20 Dec 2016 12:58:47 +0100 Subject: doc: Fix typos and clarify some aspects of the part-handler The existing documentation referred to a handle_type method when it really should be handle_part. It also referred to 'methods' when it really should say 'functions' to be clear (while it's true the built-in handlers are classes with methods of these names, in this context we mean module-level functions). Also clarified that a part-handler should come before the parts that it handles, and can override built-in handlers. --- doc/rtd/topics/format.rst | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index ed87d3ed..436eb00f 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -127,11 +127,11 @@ Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when u Part Handler ============ -This is a ``part-handler``. It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated). -This must be python code that contains a ``list_types`` method and a ``handle_type`` method. -Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles. +This is a ``part-handler``: It contains custom code for either supporting new mime-types in multi-part user data, or overriding the existing handlers for supported mime-types. It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated). +This must be python code that contains a ``list_types`` function and a ``handle_part`` function. +Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles. Because mime parts are processed in order, a ``part-handler`` part must precede any parts with mime-types it is expected to handle in the same user data. -The ``handle_type`` method must be like: +The ``handle_part`` function must be defined like: .. code-block:: python @@ -141,8 +141,9 @@ The ``handle_type`` method must be like: # filename = the filename of the part (or a generated filename if none is present in mime data) # payload = the parts' content -Cloud-init will then call the ``handle_type`` method once at begin, once per part received, and once at end. -The ``begin`` and ``end`` calls are to allow the part handler to do initialization or teardown. +Cloud-init will then call the ``handle_part`` function once before it handles any parts, once per part received, and once after all parts have been handled. +The ``'__begin__'`` and ``'__end__'`` sentinels allow the part handler to do initialization or teardown before or after +receiving any parts. Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when using a MIME archive. -- cgit v1.2.3 From b71592ce0e0a9f9f9f225315015ca57b312ad30d Mon Sep 17 00:00:00 2001 From: Andrew Jorgensen Date: Tue, 1 Nov 2016 10:54:31 -0400 Subject: EC2: Do not cache security credentials on disk On EC2, instance metadata can include credentials that remain valid for as much as 6 hours. Reading these and allowing them to be pickled represents a potential vulnerability if a snapshot of the disk is taken and shared as part of an AMI. This skips security-credentials when walking the meta-data tree. LP: #1638312 Reviewed-by: Ian Weller Reviewed-by: Ben Cressey Reported-by: Kyle Barnes --- cloudinit/ec2_utils.py | 3 +++ tests/unittests/test_ec2_util.py | 45 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index c656ef14..0c16ae47 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -82,6 +82,9 @@ class MetadataMaterializer(object): field_name = get_name(field) if not field or not field_name: continue + # Don't materialize credentials + if field_name == 'security-credentials': + continue if has_children(field): if field_name not in children: children.append(field_name) diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index 4a33d747..71c2009f 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -140,4 +140,49 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(bdm['ami'], 'sdb') self.assertEqual(bdm['ephemeral0'], 'sdc') + @hp.activate + def test_metadata_no_security_credentials(self): + base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) + hp.register_uri(hp.GET, base_url, status=200, + body="\n".join(['instance-id', + 'iam/'])) + hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'), + status=200, body='i-0123451689abcdef0') + hp.register_uri(hp.GET, + uh.combine_url(base_url, 'iam/'), + status=200, + body="\n".join(['info/', 'security-credentials/'])) + hp.register_uri(hp.GET, + uh.combine_url(base_url, 'iam/info/'), + status=200, + body='LastUpdated') + hp.register_uri(hp.GET, + uh.combine_url(base_url, 'iam/info/LastUpdated'), + status=200, body='2016-10-27T17:29:39Z') + hp.register_uri(hp.GET, + uh.combine_url(base_url, 'iam/security-credentials/'), + status=200, + body='ReadOnly/') + hp.register_uri(hp.GET, + uh.combine_url(base_url, + 'iam/security-credentials/ReadOnly/'), + status=200, + body="\n".join(['LastUpdated', 'Expiration'])) + hp.register_uri(hp.GET, + uh.combine_url( + base_url, + 'iam/security-credentials/ReadOnly/LastUpdated'), + status=200, body='2016-10-27T17:28:17Z') + hp.register_uri(hp.GET, + uh.combine_url( + base_url, + 'iam/security-credentials/ReadOnly/Expiration'), + status=200, body='2016-10-28T00:00:34Z') + md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) + self.assertEqual(md['instance-id'], 'i-0123451689abcdef0') + iam = md['iam'] + self.assertEqual(1, len(iam)) + self.assertEqual(iam['info']['LastUpdated'], '2016-10-27T17:29:39Z') + self.assertNotIn('security-credentials', iam) + # vi: ts=4 expandtab -- cgit v1.2.3 From a3376d45c83e90150d8de79a2b31282a7d760bd7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 20 Jan 2017 09:36:51 -0500 Subject: build: fix running Make on a branch with tags other than master running 'make' on a git branch other than master would fail with complaint that the tools/read-version reported a different version than the code. Change to only consider tags starting with 0-9 in read-version. --- Makefile | 7 ++++--- tools/make-tarball | 2 +- tools/read-version | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index ed631cf7..18ec5680 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,8 @@ ifeq ($(distro),) distro = redhat endif -READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version) +READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version || \ + echo read-version-failed) CODE_VERSION=$(shell $(PYVER) -c "from cloudinit import version; print(version.version_string())") @@ -62,8 +63,8 @@ test: $(unittests) check_version: @if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \ - echo "Error: read-version version $(READ_VERSION)" \ - "not equal to code version $(CODE_VERSION)"; exit 2; \ + echo "Error: read-version version '$(READ_VERSION)'" \ + "not equal to code version '$(CODE_VERSION)'"; exit 2; \ else true; fi clean_pyc: diff --git a/tools/make-tarball b/tools/make-tarball index c150dd2f..91c45624 100755 --- a/tools/make-tarball +++ b/tools/make-tarball @@ -35,7 +35,7 @@ while [ $# -ne 0 ]; do done rev=${1:-HEAD} -version=$(git describe ${long_opt} $rev) +version=$(git describe "--match=[0-9]*" ${long_opt} $rev) archive_base="cloud-init-$version" if [ -z "$output" ]; then diff --git a/tools/read-version b/tools/read-version index 3b30b497..ddb28383 100755 --- a/tools/read-version +++ b/tools/read-version @@ -56,7 +56,7 @@ if os.path.isdir(os.path.join(_tdir, ".git")) and which("git"): flags = [] if use_tags: flags = ['--tags'] - cmd = ['git', 'describe'] + flags + cmd = ['git', 'describe', '--match=[0-9]*'] + flags version = tiny_p(cmd).strip() -- cgit v1.2.3 From d3fbb5df017c7a6e0eb1a146d970db260932d7e8 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 20 Jan 2017 16:19:23 -0500 Subject: reset httppretty for each test this ensures that we call httpretty.reset() before calling httppretty.register_uri(...), which ensures that we get a fresh callback with the expected version of the metadata. LP: #1658200 --- tests/unittests/test_datasource/test_gce.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 4f667678..a5cced07 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -59,6 +59,8 @@ def _set_mock_metadata(gce_meta=None): else: return (404, headers, '') + # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316 + httpretty.reset() httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback) -- cgit v1.2.3 From 853df0a0e85002582694b88db886f206f64b23c7 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 20 Jan 2017 14:32:08 -0500 Subject: Add 3 ecdsa-sha2-nistp* ssh key types now that they are standardized cloud-init adds ssh_authorized_keys to the default user and to root but for root it disables the keys with a prefix command. However, if the public_key key is of type ecdsa-sha2-nistp521, it is not parsed correctly, and the prefix command is not prepended. Resolves: rhbz#1151824 LP: #1658174 --- cloudinit/ssh_util.py | 3 +++ tests/unittests/test_sshutil.py | 24 +++++++++++++++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index be8a49e8..b95b956f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -22,8 +22,11 @@ DEF_SSHD_CFG = "/etc/ssh/sshd_config" VALID_KEY_TYPES = ( "dsa", "ecdsa", + "ecdsa-sha2-nistp256", "ecdsa-sha2-nistp256-cert-v01@openssh.com", + "ecdsa-sha2-nistp384", "ecdsa-sha2-nistp384-cert-v01@openssh.com", + "ecdsa-sha2-nistp521", "ecdsa-sha2-nistp521-cert-v01@openssh.com", "ed25519", "rsa", diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 55971b5e..991f45a6 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -32,6 +32,22 @@ VALID_CONTENT = { "YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07" "/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw==" ), + 'ecdsa-sha2-nistp256': ( + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMy/WuXq5MF" + "r5hVQ9EEKKUTF7vUaOkgxUh6bNsCs9SFMVslIm1zM/WJYwUv52LdEePjtDYiV4A" + "l2XthJ9/bs7Pc=" + ), + 'ecdsa-sha2-nistp521': ( + "AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABOdNTkh9F" + "McK4hZRLs5LTXBEXwNr0+Yg9uvJYRFcz2ZlnjYX9tM4Z3QQFjqogU4pU+zpKLqZ" + "5VE4Jcnb1T608UywBIdXkSFZT8trGJqBv9nFWGgmTX3KP8kiBbihpuv1cGwglPl" + "Hxs50A42iP0JiT7auGtEAGsu/uMql323GTGb4171Q==" + ), + 'ecdsa-sha2-nistp384': ( + "AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBAnoqFU9Gnl" + "LcsEuCJnobs/c6whzvjCgouaOO61kgXNtIxyF4Wkutg6xaGYgBBt/phb7a2TurI" + "bcIBuzJ/mP22UyUAbNnBfStAEBmYbrTf1EfiMCYUAr1XnL0UdYmZ8HFg==" + ), } TEST_OPTIONS = ( @@ -44,7 +60,13 @@ class TestAuthKeyLineParser(test_helpers.TestCase): def test_simple_parse(self): # test key line with common 3 fields (keytype, base64, comment) parser = ssh_util.AuthKeyLineParser() - for ktype in ['rsa', 'ecdsa', 'dsa']: + ecdsa_types = [ + 'ecdsa-sha2-nistp256', + 'ecdsa-sha2-nistp384', + 'ecdsa-sha2-nistp521', + ] + + for ktype in ['rsa', 'ecdsa', 'dsa'] + ecdsa_types: content = VALID_CONTENT[ktype] comment = 'user-%s@host' % ktype line = ' '.join((ktype, content, comment,)) -- cgit v1.2.3 From 8e9e47f2f06b4dcf017149699111e94ba7c1830b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 24 Jan 2017 11:37:08 -0500 Subject: tests: No longer monkey patch httpretty for python 3.4.2 No shipping ubuntu has a python 3.4 that is less than 3.4.2. Remove this workaround to avoid unnecessary complexity. This reverts 04a60cf949. --- tests/unittests/helpers.py | 35 ----------------------- tests/unittests/test_datasource/test_gce.py | 2 +- tests/unittests/test_datasource/test_openstack.py | 3 +- tests/unittests/test_ec2_util.py | 4 +-- 4 files changed, 4 insertions(+), 40 deletions(-) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 64e56d98..a0933464 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -2,7 +2,6 @@ from __future__ import print_function -import functools import os import shutil import sys @@ -29,7 +28,6 @@ PY2 = False PY26 = False PY27 = False PY3 = False -FIX_HTTPRETTY = False _PY_VER = sys.version_info _PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3] @@ -44,8 +42,6 @@ else: PY2 = True if (_PY_MAJOR, _PY_MINOR) >= (3, 0): PY3 = True - if _PY_MINOR == 4 and _PY_MICRO < 3: - FIX_HTTPRETTY = True # Makes the old path start @@ -216,37 +212,6 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): return root -def import_httpretty(): - """Import HTTPretty and monkey patch Python 3.4 issue. - See https://github.com/gabrielfalcao/HTTPretty/pull/193 and - as well as https://github.com/gabrielfalcao/HTTPretty/issues/221. - - Lifted from - https://github.com/inveniosoftware/datacite/blob/master/tests/helpers.py - """ - if not FIX_HTTPRETTY: - import httpretty - else: - import socket - old_SocketType = socket.SocketType - - import httpretty - from httpretty import core - - def sockettype_patch(f): - @functools.wraps(f) - def inner(*args, **kwargs): - f(*args, **kwargs) - socket.SocketType = old_SocketType - socket.__dict__['SocketType'] = old_SocketType - return inner - - core.httpretty.disable = sockettype_patch( - httpretty.httpretty.disable - ) - return httpretty - - class HttprettyTestCase(TestCase): # necessary as http_proxy gets in the way of httpretty # https://github.com/gabrielfalcao/HTTPretty/issues/122 diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index a5cced07..4f83454e 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -4,6 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import httpretty import re from base64 import b64encode, b64decode @@ -15,7 +16,6 @@ from cloudinit.sources import DataSourceGCE from .. import helpers as test_helpers -httpretty = test_helpers.import_httpretty() GCE_META = { 'instance/id': '123', diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 28e1833e..7bf55084 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -5,6 +5,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import copy +import httpretty as hp import json import re @@ -20,8 +21,6 @@ from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources.helpers import openstack from cloudinit import util -hp = test_helpers.import_httpretty() - BASE_URL = "http://169.254.169.254" PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' EC2_META = { diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index 71c2009f..65fdb519 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -1,12 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +import httpretty as hp + from . import helpers from cloudinit import ec2_utils as eu from cloudinit import url_helper as uh -hp = helpers.import_httpretty() - class TestEc2Util(helpers.HttprettyTestCase): VERSION = 'latest' -- cgit v1.2.3 From 65e01b463cee0bdb8c8b415e78abfcc3262aad89 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 19 Jan 2017 17:31:34 -0500 Subject: tests: remove executable bit on test_net, so it runs, and fix it. The test_user_data_normalize and test_net files had gotten the executable bit set, and thus are skipped by nose by default. We could set run with the --exe flag, but they should not have gotten this way. Other changes here: * replace TempDirTestCase with CiTestCase, which has some nice tmp_dir() and tmp_path() functions. Going forward the intent is to have CiTestCase be the base test case for tests. * test_net: switch to CiTestCase and fix usage that was silently broken, because of exe bit. * populate_dir: return the list of files that it writes rather than having no return value. * CiTestCase: * support tmp_path("foo") that returns a full path to 'foo' under a tmpdir. * add tmp_dir() to get a temp dir and clean up. --- tests/unittests/helpers.py | 47 +++++++++++-------- tests/unittests/test__init__.py | 12 ++--- tests/unittests/test_atomic_helper.py | 4 +- .../test_distros/test_user_data_normalize.py | 0 tests/unittests/test_net.py | 53 ++++++++-------------- 5 files changed, 55 insertions(+), 61 deletions(-) mode change 100755 => 100644 tests/unittests/test_distros/test_user_data_normalize.py mode change 100755 => 100644 tests/unittests/test_net.py diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index a0933464..90e2431f 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -2,6 +2,7 @@ from __future__ import print_function +import functools import os import shutil import sys @@ -82,6 +83,28 @@ class TestCase(unittest2.TestCase): pass +class CiTestCase(TestCase): + """This is the preferred test case base class unless user + needs other test case classes below.""" + def tmp_dir(self, dir=None, cleanup=True): + # return a full path to a temporary directory that will be cleaned up. + if dir is None: + tmpd = tempfile.mkdtemp( + prefix="ci-%s." % self.__class__.__name__) + else: + tmpd = tempfile.mkdtemp(dir=dir) + self.addCleanup(functools.partial(shutil.rmtree, tmpd)) + return tmpd + + def tmp_path(self, path, dir=None): + # return an absolute path to 'path' under dir. + # if dir is None, one will be created with tmp_dir() + # the file is not created or modified. + if dir is None: + dir = self.tmp_dir() + return os.path.normpath(os.path.abspath(os.path.join(dir, path))) + + class ResourceUsingTestCase(TestCase): def setUp(self): super(ResourceUsingTestCase, self).setUp() @@ -227,29 +250,10 @@ class HttprettyTestCase(TestCase): super(HttprettyTestCase, self).tearDown() -class TempDirTestCase(TestCase): - # provide a tempdir per class, not per test. - @classmethod - def setUpClass(cls): - cls.tmpd = tempfile.mkdtemp(prefix="ci-%s." % cls.__name__) - return TestCase.setUpClass() - - @classmethod - def tearDownClass(cls): - shutil.rmtree(cls.tmpd) - return TestCase.tearDownClass() - - def tmp_path(self, path): - # if absolute path (starts with /), then make ./path - if path.startswith(os.path.sep): - path = "." + path - - return os.path.normpath(os.path.join(self.tmpd, path)) - - def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) + ret = [] for (name, content) in files.items(): p = os.path.join(path, name) util.ensure_dir(os.path.dirname(p)) @@ -259,6 +263,9 @@ def populate_dir(path, files): else: fp.write(content.encode('utf-8')) fp.close() + ret.append(p) + + return ret def dir2dict(startdir, prefix=None): diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index e6f4c318..781f6d54 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -12,7 +12,7 @@ from cloudinit import settings from cloudinit import url_helper from cloudinit import util -from .helpers import TestCase, TempDirTestCase, ExitStack, mock +from .helpers import TestCase, CiTestCase, ExitStack, mock class FakeModule(handlers.Handler): @@ -172,7 +172,7 @@ class TestHandlerHandlePart(TestCase): self.data, self.ctype, self.filename, self.payload) -class TestCmdlineUrl(TempDirTestCase): +class TestCmdlineUrl(CiTestCase): def test_parse_cmdline_url_nokey_raises_keyerror(self): self.assertRaises( KeyError, main.parse_cmdline_url, 'root=foo bar single') @@ -189,7 +189,7 @@ class TestCmdlineUrl(TempDirTestCase): cmdline = "ro %s=%s bar=1" % (key, url) m_read.return_value = url_helper.StringResponse(b"unexpected blob") - fpath = self.tmp_path("test_valid") + fpath = self.tmp_path("ccfile") lvl, msg = main.attempt_cmdline_url( fpath, network=True, cmdline=cmdline) self.assertEqual(logging.WARN, lvl) @@ -203,7 +203,7 @@ class TestCmdlineUrl(TempDirTestCase): cmdline = "ro %s=%s bar=1" % ('cloud-config-url', url) m_read.return_value = url_helper.StringResponse(payload) - fpath = self.tmp_path("test_valid") + fpath = self.tmp_path("ccfile") lvl, msg = main.attempt_cmdline_url( fpath, network=True, cmdline=cmdline) self.assertEqual(util.load_file(fpath, decode=False), payload) @@ -213,7 +213,7 @@ class TestCmdlineUrl(TempDirTestCase): @mock.patch('cloudinit.cmd.main.util.read_file_or_url') def test_no_key_found(self, m_read): cmdline = "ro mykey=http://example.com/foo root=foo" - fpath = self.tmp_path("test_no_key_found") + fpath = self.tmp_path("ccpath") lvl, msg = main.attempt_cmdline_url( fpath, network=True, cmdline=cmdline) @@ -225,7 +225,7 @@ class TestCmdlineUrl(TempDirTestCase): def test_exception_warns(self, m_read): url = "http://example.com/foo" cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url - fpath = self.tmp_path("test_no_key_found") + fpath = self.tmp_path("ccfile") m_read.side_effect = url_helper.UrlError( cause="Unexpected Error", url="http://example.com/foo") diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py index e170c7c3..515919d8 100644 --- a/tests/unittests/test_atomic_helper.py +++ b/tests/unittests/test_atomic_helper.py @@ -6,10 +6,10 @@ import stat from cloudinit import atomic_helper -from . import helpers +from .helpers import CiTestCase -class TestAtomicHelper(helpers.TempDirTestCase): +class TestAtomicHelper(CiTestCase): def test_basic_usage(self): """write_file takes bytes if no omode.""" path = self.tmp_path("test_basic_usage") diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py old mode 100755 new mode 100644 diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py old mode 100755 new mode 100644 index 1090282a..2c2bde96 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -8,11 +8,10 @@ from cloudinit.net import sysconfig from cloudinit.sources.helpers import openstack from cloudinit import util +from .helpers import CiTestCase from .helpers import dir2dict from .helpers import mock from .helpers import populate_dir -from .helpers import TempDirTestCase -from .helpers import TestCase import base64 import copy @@ -20,8 +19,6 @@ import gzip import io import json import os -import shutil -import tempfile import textwrap import yaml @@ -478,7 +475,7 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path.side_effect = sys_dev_path -class TestSysConfigRendering(TestCase): +class TestSysConfigRendering(CiTestCase): @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @@ -486,8 +483,7 @@ class TestSysConfigRendering(TestCase): def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path): - tmp_dir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, tmp_dir) + tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -518,9 +514,7 @@ USERCTL=no self.assertEqual(expected_content, content) def test_openstack_rendering_samples(self): - tmp_dir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, tmp_dir) - render_dir = os.path.join(tmp_dir, "render") + render_dir = self.tmp_dir() for os_sample in OS_SAMPLES: ex_input = os_sample['in_data'] ex_mac_addrs = os_sample['in_macs'] @@ -535,7 +529,7 @@ USERCTL=no self.assertEqual(expected_content, fh.read()) -class TestEniNetRendering(TestCase): +class TestEniNetRendering(CiTestCase): @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @@ -543,8 +537,7 @@ class TestEniNetRendering(TestCase): def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path): - tmp_dir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, tmp_dir) + tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -576,7 +569,7 @@ iface eth1000 inet dhcp self.assertEqual(expected.lstrip(), contents.lstrip()) -class TestEniNetworkStateToEni(TestCase): +class TestEniNetworkStateToEni(CiTestCase): mycfg = { 'config': [{"type": "physical", "name": "eth0", "mac_address": "c0:d6:9f:2c:e8:80", @@ -607,7 +600,7 @@ class TestEniNetworkStateToEni(TestCase): self.assertNotIn("hwaddress", rendered) -class TestCmdlineConfigParsing(TestCase): +class TestCmdlineConfigParsing(CiTestCase): simple_cfg = { 'config': [{"type": "physical", "name": "eth0", "mac_address": "c0:d6:9f:2c:e8:80", @@ -665,7 +658,7 @@ class TestCmdlineConfigParsing(TestCase): self.assertEqual(found, self.simple_cfg) -class TestCmdlineReadKernelConfig(TempDirTestCase): +class TestCmdlineReadKernelConfig(CiTestCase): macs = { 'eth0': '14:02:ec:42:48:00', 'eno1': '14:02:ec:42:48:01', @@ -673,8 +666,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): def test_ip_cmdline_read_kernel_cmdline_ip(self): content = {'net-eth0.conf': DHCP_CONTENT_1} - populate_dir(self.tmp, content) - files = [os.path.join(self.tmp, k) for k in content.keys()] + files = sorted(populate_dir(self.tmp_dir(), content)) found = cmdline.read_kernel_cmdline_config( files=files, cmdline='foo ip=dhcp', mac_addrs=self.macs) exp1 = copy.deepcopy(DHCP_EXPECTED_1) @@ -684,8 +676,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): def test_ip_cmdline_read_kernel_cmdline_ip6(self): content = {'net6-eno1.conf': DHCP6_CONTENT_1} - populate_dir(self.tmp, content) - files = [os.path.join(self.tmp, k) for k in content.keys()] + files = sorted(populate_dir(self.tmp_dir(), content)) found = cmdline.read_kernel_cmdline_config( files=files, cmdline='foo ip6=dhcp root=/dev/sda', mac_addrs=self.macs) @@ -701,8 +692,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): def test_ip_cmdline_read_kernel_cmdline_none(self): # if there is no ip= or ip6= on cmdline, return value should be None content = {'net6-eno1.conf': DHCP6_CONTENT_1} - populate_dir(self.tmp, content) - files = [os.path.join(self.tmp, k) for k in content.keys()] + files = sorted(populate_dir(self.tmp_dir(), content)) found = cmdline.read_kernel_cmdline_config( files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) self.assertEqual(found, None) @@ -710,8 +700,7 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): def test_ip_cmdline_both_ip_ip6(self): content = {'net-eth0.conf': DHCP_CONTENT_1, 'net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')} - populate_dir(self.tmp, content) - files = [os.path.join(self.tmp, k) for k in sorted(content.keys())] + files = sorted(populate_dir(self.tmp_dir(), content)) found = cmdline.read_kernel_cmdline_config( files=files, cmdline='foo ip=dhcp ip6=dhcp', mac_addrs=self.macs) @@ -725,14 +714,12 @@ class TestCmdlineReadKernelConfig(TempDirTestCase): self.assertEqual(found['config'], expected) -class TestEniRoundTrip(TestCase): - def setUp(self): - super(TestCase, self).setUp() - self.tmp_dir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp_dir) - +class TestEniRoundTrip(CiTestCase): def _render_and_read(self, network_config=None, state=None, eni_path=None, - links_prefix=None, netrules_path=None): + links_prefix=None, netrules_path=None, dir=None): + if dir is None: + dir = self.tmp_dir() + if network_config: ns = network_state.parse_net_config_data(network_config) elif state: @@ -747,8 +734,8 @@ class TestEniRoundTrip(TestCase): config={'eni_path': eni_path, 'links_path_prefix': links_prefix, 'netrules_path': netrules_path}) - renderer.render_network_state(self.tmp_dir, ns) - return dir2dict(self.tmp_dir) + renderer.render_network_state(dir, ns) + return dir2dict(dir) def testsimple_convert_and_render(self): network_config = eni.convert_eni_data(EXAMPLE_ENI) -- cgit v1.2.3 From dc6e7b49bac8b87a38fe57ee621177a8177fa2c0 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 25 Jan 2017 14:55:38 -0500 Subject: tools/mock-meta: support python2 or python3 and ipv6 in both. Fix mock-meta to work with python2 or python3. Additionally, it will now listen to ipv6 connections, where previously it would only work with ipv4. --- tools/mock-meta.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/tools/mock-meta.py b/tools/mock-meta.py index d74f9e31..a0d99441 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -18,10 +18,10 @@ Then: """ import functools -import httplib import json import logging import os +import socket import random import string import sys @@ -29,7 +29,13 @@ import yaml from optparse import OptionParser -from BaseHTTPServer import (HTTPServer, BaseHTTPRequestHandler) +try: + from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler + import httplib as hclient +except ImportError: + from http.server import HTTPServer, BaseHTTPRequestHandler + from http import client as hclient + log = logging.getLogger('meta-server') @@ -183,6 +189,10 @@ def get_ssh_keys(): return keys +class HTTPServerV6(HTTPServer): + address_family = socket.AF_INET6 + + class MetaDataHandler(object): def __init__(self, opts): @@ -250,7 +260,7 @@ class MetaDataHandler(object): key_id = int(mybe_key) key_name = key_ids[key_id] except: - raise WebException(httplib.BAD_REQUEST, + raise WebException(hclient.BAD_REQUEST, "Unknown key id %r" % mybe_key) # Extract the possible sub-params result = traverse(nparams[1:], { @@ -342,13 +352,13 @@ class Ec2Handler(BaseHTTPRequestHandler): return self._get_versions date = segments[0].strip().lower() if date not in self._get_versions(): - raise WebException(httplib.BAD_REQUEST, + raise WebException(hclient.BAD_REQUEST, "Unknown version format %r" % date) if len(segments) < 2: - raise WebException(httplib.BAD_REQUEST, "No action provided") + raise WebException(hclient.BAD_REQUEST, "No action provided") look_name = segments[1].lower() if look_name not in func_mapping: - raise WebException(httplib.BAD_REQUEST, + raise WebException(hclient.BAD_REQUEST, "Unknown requested data %r" % look_name) base_func = func_mapping[look_name] who = self.address_string() @@ -371,16 +381,16 @@ class Ec2Handler(BaseHTTPRequestHandler): data = func() if not data: data = '' - self.send_response(httplib.OK) + self.send_response(hclient.OK) self.send_header("Content-Type", "binary/octet-stream") self.send_header("Content-Length", len(data)) log.info("Sending data (len=%s):\n%s", len(data), format_text(data)) self.end_headers() - self.wfile.write(data) + self.wfile.write(data.encode()) except RuntimeError as e: log.exception("Error somewhere in the server.") - self.send_error(httplib.INTERNAL_SERVER_ERROR, message=str(e)) + self.send_error(hclient.INTERNAL_SERVER_ERROR, message=str(e)) except WebException as e: code = e.code log.exception(str(e)) @@ -408,7 +418,7 @@ def extract_opts(): help=("port from which to serve traffic" " (default: %default)")) parser.add_option("-a", "--addr", dest="address", action="store", type=str, - default='0.0.0.0', metavar="ADDRESS", + default='::', metavar="ADDRESS", help=("address from which to serve traffic" " (default: %default)")) parser.add_option("-f", '--user-data-file', dest='user_data_file', @@ -444,7 +454,7 @@ def run_server(): setup_fetchers(opts) log.info("CLI opts: %s", opts) server_address = (opts['address'], opts['port']) - server = HTTPServer(server_address, Ec2Handler) + server = HTTPServerV6(server_address, Ec2Handler) sa = server.socket.getsockname() log.info("Serving ec2 metadata on %s using port %s ...", sa[0], sa[1]) server.serve_forever() -- cgit v1.2.3 From 2de1c247e285cce0b25ab70abdc56ccd41019c27 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 25 Jan 2017 15:45:40 -0600 Subject: Fix eni rendering of multiple IPs per interface The iface:alias syntax for eni rendering is brittle with ipv6. Replace it with using multiple iface stanzas with the same iface name which is supported. Side-effect is that one can no longer do 'ifup $iface:$alias' but requires instead use of ip address {add|delete} instead. LP: #1657940 --- cloudinit/net/eni.py | 33 ++++++++++++++++++-------------- tests/unittests/test_net.py | 46 +++++++++++++++++++++++++++++++++------------ 2 files changed, 53 insertions(+), 26 deletions(-) diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index b06ffac9..5b249f1f 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -90,8 +90,6 @@ def _iface_add_attrs(iface, index): def _iface_start_entry(iface, index, render_hwaddress=False): fullname = iface['name'] - if index != 0: - fullname += ":%s" % index control = iface['control'] if control == "auto": @@ -113,6 +111,16 @@ def _iface_start_entry(iface, index, render_hwaddress=False): return lines +def _subnet_is_ipv6(subnet): + # 'static6' or 'dhcp6' + if subnet['type'].endswith('6'): + # This is a request for DHCPv6. + return True + elif subnet['type'] == 'static' and ":" in subnet['address']: + return True + return False + + def _parse_deb_config_data(ifaces, contents, src_dir, src_path): """Parses the file contents, placing result into ifaces. @@ -354,21 +362,23 @@ class Renderer(renderer.Renderer): sections = [] subnets = iface.get('subnets', {}) if subnets: - for index, subnet in zip(range(0, len(subnets)), subnets): + for index, subnet in enumerate(subnets): iface['index'] = index iface['mode'] = subnet['type'] iface['control'] = subnet.get('control', 'auto') subnet_inet = 'inet' - if iface['mode'].endswith('6'): - # This is a request for DHCPv6. - subnet_inet += '6' - elif iface['mode'] == 'static' and ":" in subnet['address']: - # This is a static IPv6 address. + if _subnet_is_ipv6(subnet): subnet_inet += '6' iface['inet'] = subnet_inet - if iface['mode'].startswith('dhcp'): + if subnet['type'].startswith('dhcp'): iface['mode'] = 'dhcp' + # do not emit multiple 'auto $IFACE' lines as older (precise) + # ifupdown complains + if True in ["auto %s" % (iface['name']) in line + for line in sections]: + iface['control'] = 'alias' + lines = list( _iface_start_entry( iface, index, render_hwaddress=render_hwaddress) + @@ -378,11 +388,6 @@ class Renderer(renderer.Renderer): for route in subnet.get('routes', []): lines.extend(self._render_route(route, indent=" ")) - if len(subnets) > 1 and index == 0: - tmpl = " post-up ifup %s:%s\n" - for i in range(1, len(subnets)): - lines.append(tmpl % (iface['name'], i)) - sections.append(lines) else: # ifenslave docs say to auto the slave devices diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 2c2bde96..b77d277a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -219,11 +219,9 @@ NETWORK_CONFIGS = { auto eth99 iface eth99 inet dhcp - post-up ifup eth99:1 - - auto eth99:1 - iface eth99:1 inet static + # control-alias eth99 + iface eth99 inet static address 192.168.21.3/24 dns-nameservers 8.8.8.8 8.8.4.4 dns-search barley.maas sach.maas @@ -261,6 +259,27 @@ NETWORK_CONFIGS = { - wark.maas """), }, + 'v4_and_v6': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + + # control-alias iface0 + iface iface0 inet6 dhcp + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp4'} + - {'type': 'dhcp6'} + """).rstrip(' '), + }, 'all': { 'expected_eni': ("""\ auto lo @@ -298,11 +317,9 @@ iface br0 inet static address 192.168.14.2/24 bridge_ports eth3 eth4 bridge_stp off - post-up ifup br0:1 - -auto br0:1 -iface br0:1 inet6 static +# control-alias br0 +iface br0 inet6 static address 2001:1::1/64 auto bond0.200 @@ -319,11 +336,9 @@ iface eth0.101 inet static mtu 1500 vlan-raw-device eth0 vlan_id 101 - post-up ifup eth0.101:1 - -auto eth0.101:1 -iface eth0.101:1 inet static +# control-alias eth0.101 +iface eth0.101 inet static address 192.168.2.10/24 post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true @@ -758,6 +773,13 @@ class TestEniRoundTrip(CiTestCase): entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) + def testsimple_render_v4_and_v6(self): + entry = NETWORK_CONFIGS['v4_and_v6'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + def test_routes_rendered(self): # as reported in bug 1649652 conf = [ -- cgit v1.2.3 From 9a061c1838c3e9d2ed3f3d73e30248f7a79af7da Mon Sep 17 00:00:00 2001 From: Robin Naundorf Date: Wed, 25 Jan 2017 20:04:29 +0100 Subject: Fix small typo and change iso-filename for consistency * Fix small typo * Fix ISO-Filename for consistency --- doc/rtd/topics/datasources/altcloud.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst index 8646e77e..202b0a4a 100644 --- a/doc/rtd/topics/datasources/altcloud.rst +++ b/doc/rtd/topics/datasources/altcloud.rst @@ -66,7 +66,7 @@ NOTE: The file name on the ISO must be: ``user-data.txt`` .. sourcecode:: sh - % cp simple_scirpt.bash my-iso/user-data.txt + % cp simple_script.bash my-iso/user-data.txt % genisoimage -o user-data.iso -r my-iso Verify the ISO @@ -75,7 +75,7 @@ Verify the ISO .. sourcecode:: sh % sudo mkdir /media/vsphere_iso - % sudo mount -o loop JoeV_CI_02.iso /media/vsphere_iso + % sudo mount -o loop user-data.iso /media/vsphere_iso % cat /media/vsphere_iso/user-data.txt % sudo umount /media/vsphere_iso -- cgit v1.2.3 From 9698b0ded3d7e72f54513f248d8da41e08472f68 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 4 Feb 2017 02:24:55 +0000 Subject: Add tools/ds-identify to identify datasources available. ds-identify is run here from the generator. If ds-identify does not see any datasources, it can completely disable cloud-init. The big value in this is that if there is no datasource, no python will ever be loaded, and cloud-init will be disabled.o The default policy being added here is: search,found=all,maybe=all,notfound=disabled That means: - enable (in 'datasource_list') all sources that are found. - if none are found, enable all 'maybe'. - if no maybe are found, then disable cloud-init. On platforms without DMI (everything except for aarch64 and x86), the default 'notfound' setting is 'enabled'. This is because many of the detection mechanisms rely on dmi data, which is present only on x86 and aarch64. --- cloudinit/settings.py | 2 + setup.py | 3 +- systemd/cloud-init-generator | 39 +- tools/ds-identify | 1015 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1056 insertions(+), 3 deletions(-) create mode 100755 tools/ds-identify diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b1fdd31f..692ff5e5 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -14,6 +14,8 @@ CFG_ENV_NAME = "CLOUD_CFG" # This is expected to be a yaml formatted file CLOUD_CONFIG = '/etc/cloud/cloud.cfg' +RUN_CLOUD_CONFIG = '/run/cloud-init/cloud.cfg' + # What u get if no config is provided CFG_BUILTIN = { 'datasource_list': [ diff --git a/setup.py b/setup.py index 04036078..e6693c90 100755 --- a/setup.py +++ b/setup.py @@ -168,7 +168,8 @@ else: (ETC + '/cloud/templates', glob('templates/*')), (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), - (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', + (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', + 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), (USR + '/share/doc/cloud-init/examples', diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator index fedb6309..bd9f2678 100755 --- a/systemd/cloud-init-generator +++ b/systemd/cloud-init-generator @@ -6,6 +6,8 @@ DEBUG_LEVEL=1 LOG_D="/run/cloud-init" ENABLE="enabled" DISABLE="disabled" +FOUND="found" +NOTFOUND="notfound" RUN_ENABLED_FILE="$LOG_D/$ENABLE" CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" CLOUD_TARGET_NAME="cloud-init.target" @@ -74,10 +76,30 @@ default() { _RET="$ENABLE" } +check_for_datasource() { + local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" + if [ ! -x "$dsidentify" ]; then + debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" + return 0 + fi + $dsidentify + ds_rc=$? + debug 1 "ds-identify rc=$ds_rc" + if [ "$ds_rc" = "0" ]; then + _RET="$FOUND" + debug 1 "ds-identify _RET=$_RET" + return 0 + fi + _RET="$NOTFOUND" + debug 1 "ds-identify _RET=$_RET" + return 1 +} + main() { local normal_d="$1" early_d="$2" late_d="$3" local target_name="multi-user.target" gen_d="$early_d" local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}" + local ds="$NOTFOUND" debug 1 "$0 normal=$normal_d early=$early_d late=$late_d" debug 2 "$0 $*" @@ -93,7 +115,20 @@ main() { debug 0 "search $search returned $ret" fi done - + + # enable AND ds=found == enable + # enable AND ds=notfound == disable + # disable || == disabled + if [ "$result" = "$ENABLE" ]; then + debug 1 "checking for datasource" + check_for_datasource + ds=$_RET + if [ "$ds" = "$NOTFOUND" ]; then + debug 1 "cloud-init is enabled but no datasource found, disabling" + result="$DISABLE" + fi + fi + if [ "$result" = "$ENABLE" ]; then if [ -e "$link_path" ]; then debug 1 "already enabled: no change needed" @@ -124,7 +159,7 @@ main() { rm -f "$RUN_ENABLED_FILE" fi else - debug 0 "unexpected result '$result'" + debug 0 "unexpected result '$result' 'ds=$ds'" ret=3 fi return $ret diff --git a/tools/ds-identify b/tools/ds-identify new file mode 100755 index 00000000..203eac0d --- /dev/null +++ b/tools/ds-identify @@ -0,0 +1,1015 @@ +#!/bin/sh +# +# ds-identify is configured via /etc/cloud/ds-identify.cfg +# or on the kernel command line. It takes primarily 2 inputs: +# datasource: can specify the datasource that should be used. +# kernel command line option: ci.datasource= +# +# policy: a string that indicates how ds-identify should operate. +# kernel command line option: ci.di.policy= +# default setting is: +# search,found=all,maybe=all,notfound=disable + +# report: write config to /run/cloud-init/cloud.cfg.report (instead of +# /run/cloud-init/cloud.cfg, which effectively makes this dry-run). +# enable: do nothing +# ds-identify writes no config and just exits success +# the caller (cloud-init-generator) then enables cloud-init to run +# just without any aid from ds-identify. +# disable: disable cloud-init +# +# [report,]found=value,maybe=value,notfound=value +# found: (default=first) +# first: use the first found do no further checking +# all: enable all DS_FOUND +# +# maybe: (default=all) +# if nothing returned 'found', then how to handle maybe. +# no network sources are allowed to return 'maybe'. +# all: enable all DS_MAYBE +# none: ignore any DS_MAYBE +# +# notfound: (default=disable) +# disable: disable cloud-init +# enable: enable cloud-init +# +# +# zesty: +# policy: found=first,maybe=all,none=disable +# xenial: +# policy: found=all,maybe=all,none=enable +# and then at a later date + + +set -u +set -f +UNAVAILABLE="unavailable" +CR=" +" +ERROR="error" +DI_ENABLED="enabled" +DI_DISABLED="disabled" + +DI_DEBUG_LEVEL="${DEBUG_LEVEL:-1}" + +PATH_ROOT=${PATH_ROOT:-""} +PATH_RUN=${PATH_RUN:-"${PATH_ROOT}/run"} +PATH_SYS_CLASS_DMI_ID=${PATH_SYS_CLASS_DMI_ID:-${PATH_ROOT}/sys/class/dmi/id} +PATH_SYS_HYPERVISOR=${PATH_SYS_HYPERVISOR:-${PATH_ROOT}/sys/hypervisor} +PATH_SYS_CLASS_BLOCK=${PATH_SYS_CLASS_BLOCK:-${PATH_ROOT}/sys/class/block} +PATH_DEV_DISK="${PATH_DEV_DISK:-${PATH_ROOT}/dev/disk}" +PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}" +PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" +PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" +PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" +PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}" +PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" +PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} + +DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" +_DI_LOGGED="" + +# set DI_MAIN='noop' in environment to source this file with no main called. +DI_MAIN=${DI_MAIN:-main} + +DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" +DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" +DI_DMI_PRODUCT_NAME="" +DI_DMI_SYS_VENDOR="" +DI_DMI_PRODUCT_SERIAL="" +DI_DMI_PRODUCT_UUID="" +DI_FS_LABELS="" +DI_KERNEL_CMDLINE="" +DI_VIRT="" + +DI_UNAME_KERNEL_NAME="" +DI_UNAME_KERNEL_RELEASE="" +DI_UNAME_KERNEL_VERSION="" +DI_UNAME_MACHINE="" +DI_UNAME_NODENAME="" +DI_UNAME_OPERATING_SYSTEM="" +DI_UNAME_CMD_OUT="" + +DS_FOUND=0 +DS_NOT_FOUND=1 +DS_MAYBE=2 + +DI_DSNAME="" +# this has to match the builtin list in cloud-init, it is what will +# be searched if there is no setting found in config. +DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ +CloudSigma CloudStack DigitalOcean Ec2 OpenNebula OpenStack OVF SmartOS" +DI_DSLIST="" +DI_MODE="" +DI_REPORT="" +DI_ON_FOUND="" +DI_ON_MAYBE="" +DI_ON_NOTFOUND="" + + +error() { + set -- "ERROR:" "$@"; + debug 0 "$@" + stderr "$@" +} +warn() { + set -- "WARN:" "$@" + debug 0 "$@" + stderr "$@" +} + +stderr() { echo "$@" 1>&2; } + +debug() { + local lvl="$1" + shift + [ "$lvl" -gt "${DI_DEBUG_LEVEL}" ] && return + + if [ "$_DI_LOGGED" != "$DI_LOG" ]; then + # first time here, open file descriptor for append + case "$DI_LOG" in + stderr) :;; + ?*/*) + if [ ! -d "${DI_LOG%/*}" ]; then + mkdir -p "${DI_LOG%/*}" || { + stderr "ERROR:" "cannot write to $DI_LOG" + DI_LOG="stderr" + } + fi + esac + if [ "$DI_LOG" = "stderr" ]; then + exec 3>&2 + else + ( exec 3>>"$DI_LOG" ) && exec 3>>"$DI_LOG" || { + stderr "ERROR: failed writing to $DI_LOG. logging to stderr."; + exec 3>&2 + DI_LOG="stderr" + } + fi + _DI_LOGGED="$DI_LOG" + fi + echo "$@" 1>&3 +} + +get_dmi_field() { + local path="${PATH_SYS_CLASS_DMI_ID}/$1" + if [ ! -f "$path" ] || [ ! -r "$path" ]; then + _RET="$UNAVAILABLE" + return + fi + read _RET < "${path}" || _RET="$ERROR" +} + +block_dev_with_label() { + local p="${PATH_DEV_DISK}/by-label/$1" + [ -b "$p" ] || return 1 + _RET=$p + return 0 +} + +read_fs_labels() { + cached "${DI_FS_LABELS}" && return 0 + # do not rely on links in /dev/disk which might not be present yet. + # note that older blkid versions do not report DEVNAME in 'export' output. + local out="" ret=0 oifs="$IFS" line="" delim="," + local labels="" + if is_container; then + # blkid will in a container, or at least currently in lxd + # not provide useful information. + DI_FS_LABELS="$UNAVAILABLE:container" + else + out=$(blkid -c /dev/null -o export) || { + ret=$? + error "failed running [$ret]: blkid -c /dev/null -o export" + return $ret + } + IFS="$CR" + set -- $out + IFS="$oifs" + for line in "$@"; do + case "${line}" in + LABEL=*) labels="${labels}${line#LABEL=}${delim}";; + esac + done + DI_FS_LABELS="${labels%${delim}}" + fi +} + +cached() { + [ -n "$1" ] && _RET="$1" && return || return 1 +} + + +has_cdrom() { + [ -e "${PATH_ROOT}/dev/cdrom" ] +} + +read_virt() { + cached "$DI_VIRT" && return 0 + local out="" r="" virt="${UNAVAILABLE}" + if [ -d /run/systemd ]; then + out=$(systemd-detect-virt 2>&1) + r=$? + if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then + virt="$out" + fi + fi + DI_VIRT=$virt +} + +is_container() { + case "${DI_VIRT}" in + lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;; + *) return 1;; + esac +} + +read_kernel_cmdline() { + cached "${DI_KERNEL_CMDLINE}" && return + local cmdline="" fpath="${PATH_PROC_CMDLINE}" + if is_container; then + local p1path="${PATH_PROC_1_CMDLINE}" x="" + cmdline="${UNAVAILABLE}:container" + if [ -f "$p1path" ] && x=$(tr '\0' ' ' < "$p1path"); then + cmdline=$x + fi + elif [ -f "$fpath" ]; then + read cmdline <"$fpath" + else + cmdline="${UNAVAILABLE}:no-cmdline" + fi + DI_KERNEL_CMDLINE="$cmdline" +} + +read_dmi_sys_vendor() { + cached "${DI_DMI_SYS_VENDOR}" && return + get_dmi_field sys_vendor + DI_DMI_SYS_VENDOR="$_RET" +} + +read_dmi_product_name() { + cached "${DI_DMI_PRODUCT_NAME}" && return + get_dmi_field product_name + DI_DMI_PRODUCT_NAME="$_RET" +} + +read_dmi_product_uuid() { + cached "${DI_DMI_PRODUCT_UUID}" && return + get_dmi_field product_uuid + DI_DMI_PRODUCT_UUID="$_RET" +} + +read_dmi_product_serial() { + cached "${DI_DMI_PRODUCT_SERIAL}" && return + get_dmi_field product_serial + DI_DMI_PRODUCT_SERIAL="$_RET" +} + +read_uname_info() { + # run uname, and parse output. + # uname is tricky to parse as it outputs always in a given order + # independent of option order. kernel-version is known to have spaces. + # 1 -s kernel-name + # 2 -n nodename + # 3 -r kernel-release + # 4.. -v kernel-version(whitespace) + # N-2 -m machine + # N-1 -o operating-system + cached "${DI_UNAME_CMD_OUT}" && return + local out="${1:-}" ret=0 buf="" + if [ -z "$out" ]; then + out=$(uname -snrvmo) || { + ret=$? + error "failed reading uname with 'uname -snrvmo'" + return $ret + } + fi + set -- $out + DI_UNAME_KERNEL_NAME="$1" + DI_UNAME_NODENAME="$2" + DI_UNAME_KERNEL_RELEASE="$3" + shift 3 + while [ $# -gt 2 ]; do + buf="$buf $1" + shift + done + DI_UNAME_KERNEL_VERSION="${buf# }" + DI_UNAME_MACHINE="$1" + DI_UNAME_OPERATING_SYSTEM="$2" + DI_UNAME_CMD_OUT="$out" + return 0 +} + +parse_yaml_array() { + # parse a yaml single line array value ([1,2,3], not key: [1,2,3]). + # supported with or without leading and closing brackets + # ['1'] or [1] + # '1', '2' + local val="$1" oifs="$IFS" ret="" tok="" + val=${val#[} + val=${val%]} + IFS=","; set -- $val; IFS="$oifs" + for tok in "$@"; do + trim "$tok" + unquote "$_RET" + ret="${ret} $_RET" + done + _RET="${ret# }" +} + +read_datasource_list() { + cached "$DI_DSLIST" && return + local dslist="" + # if DI_DSNAME is set directly, then avoid parsing config. + if [ -n "${DI_DSNAME}" ]; then + dslist="${DI_DSNAME}" + fi + + # LP: #1582323. cc:{'datasource_list': ['name']} + # more generically cc:[end_cc] + local cb="]" ob="[" + case "$DI_KERNEL_CMDLINE" in + *cc:*datasource_list*) + t=${DI_KERNEL_CMDLINE##*datasource_list} + t=${t%%$cb*} + t=${t##*$ob} + parse_yaml_array "$t" + dslist=${_RET} + ;; + esac + if [ -z "$dslist" ] && check_config datasource_list; then + debug 1 "$_RET_fname set datasource_list: $_RET" + parse_yaml_array "$_RET" + dslist=${_RET} + fi + if [ -z "$dslist" ]; then + dslist=${DI_DSLIST_DEFAULT} + debug 1 "no datasource_list found, using default:" $dslist + fi + DI_DSLIST=$dslist + return 0 +} + +dmi_product_name_matches() { + is_container && return 1 + case "${DI_DMI_PRODUCT_NAME}" in + $1) return 0;; + esac + return 1 +} + +dmi_product_name_is() { + is_container && return 1 + [ "${DI_DMI_PRODUCT_NAME}" = "$1" ] +} + +dmi_sys_vendor_is() { + is_container && return 1 + [ "${DI_DMI_SYS_VENDOR}" = "$1" ] +} + +has_fs_with_label() { + local label=",$1," + case "${DI_FS_LABELS}" in + *,$label,*) return 0;; + esac + return 1 +} + +nocase_equal() { + # nocase_equal(a, b) + # return 0 if case insenstive comparision a.lower() == b.lower() + # different lengths + [ "${#1}" = "${#2}" ] || return 1 + # case sensitive equal + [ "$1" = "$2" ] && return 0 + + local delim="-delim-" + out=$(echo "$1${delim}$2" | tr A-Z a-z) + [ "${out#*${delim}}" = "${out%${delim}*}" ] +} + +check_seed_dir() { + # check_seed_dir(name, [required]) + # check the seed dir /var/lib/cloud/seed/ for 'required' + # required defaults to 'meta-data' + local name="$1" + local dir="${PATH_VAR_LIB_CLOUD}/seed/$name" + [ -d "$dir" ] || return 1 + shift + if [ $# -eq 0 ]; then + set -- meta-data + fi + local f="" + for f in "$@"; do + [ -f "$dir/$f" ] || return 1 + done + return 0 +} + +probe_floppy() { + cached "${STATE_FLOPPY_PROBED}" && return "${STATE_FLOPPY_PROBED}" + local fpath=/dev/floppy + + [ -b "$fpath" ] || + { STATE_FLOPPY_PROBED=1; return 1; } + + modprobe --use-blacklist floppy >/dev/null 2>&1 || + { STATE_FLOPPY_PROBED=1; return 1; } + + udevadm settle "--exit-if-exists=$fpath" || + { STATE_FLOPPY_PROBED=1; return 1; } + + [ -b "$fpath" ] + STATE_FLOPPY_PROBED=$? + return "${STATE_FLOPPY_PROBED}" +} + + +dscheck_CloudStack() { + is_container && return ${DS_NOT_FOUND} + dmi_product_name_matches "CloudStack*" && return $DS_FOUND + return $DS_NOT_FOUND +} + +dscheck_CloudSigma() { + # http://paste.ubuntu.com/23624795/ + dmi_product_name_is "CloudSigma" && return $DS_FOUND + return $DS_NOT_FOUND +} + +check_config() { + # somewhat hackily read config for 'key' in files matching 'files' + # currently does not respect any hierarchy. + local key="$1" files="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" + if [ $# -eq 1 ]; then + files="$bp ${bp}.d/*.cfg" + else + files="$*" + fi + shift + set +f; set -- $files; set +f; + if [ "$1" = "$files" -a ! -f "$1" ]; then + return 1 + fi + local fname="" line="" ret="" found=0 found_fn="" + for fname in "$@"; do + [ -f "$fname" ] || continue + while read line; do + line=${line%%#*} + case "$line" in + $key:\ *|$key:) + ret=${line#*:}; + ret=${ret# }; + found=$((found+1)) + found_fn="$fname";; + esac + done <"$fname" + done + if [ $found -ne 0 ]; then + _RET="$ret" + _RET_fname="$found_fn" + return 0 + fi + return 1 +} + +dscheck_MAAS() { + is_container && return "${DS_NOT_FOUND}" + # heuristic check for ephemeral boot environment + # for maas that do not set 'ci.dsname=' in the ephemeral environment + # these have iscsi root and cloud-config-url on the cmdline. + local maasiqn="iqn.2004-05.com.ubuntu:maas" + case "${DI_KERNEL_CMDLINE}" in + *cloud-config-url=*${maasiqn}*|*${maasiqn}*cloud-config-url=*) + return ${DS_FOUND} + ;; + esac + + # check config files written by maas for installed system. + local confd="${PATH_CLOUD_CONFD}" + local fnmatch="$confd/*maas*.cfg $confd/*kernel_cmdline*.cfg" + if check_config "MAAS" "$fnmatch"; then + return "${DS_FOUND}" + fi + return ${DS_NOT_FOUND} +} + +dscheck_NoCloud() { + local fslabel="cidata" d="" + for d in nocloud nocloud-net; do + check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} + done + if has_fs_with_label "${fslabel}"; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +check_configdrive_v2() { + if has_fs_with_label "config-2"; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +check_configdrive_v1() { + # FIXME: this has to check any file system that is vfat... + # for now, just return not found. + return ${DS_NOT_FOUND} +} + +dscheck_ConfigDrive() { + local ret="" + check_configdrive_v2 + ret=$? + [ $DS_FOUND -eq $ret ] && return $ret + + check_configdrive_v1 +} + +dscheck_DigitalOcean() { + dmi_sys_vendor_is DigitalOcean && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + +dscheck_OpenNebula() { + check_seed_dir opennebula && return ${DS_FOUND} + has_fs_with_label "CONTEXT" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + +dscheck_OVF() { + local p="" + check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" + + has_cdrom || return ${DS_NOT_FOUND} + + # FIXME: currently just return maybe if there is a cdrom + # ovf iso9660 transport does not specify an fs label. + # better would be to check if + return ${DS_MAYBE} +} + +dscheck_Azure() { + # http://paste.ubuntu.com/23630873/ + # $ grep /sr0 /run/blkid/blkid.tab + # /dev/sr0 + # + check_seed_dir azure ovf-env.xml && return ${DS_FOUND} + + [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} + + has_fs_with_label "rd_rdfe_*" && return ${DS_FOUND} + + return ${DS_NOT_FOUND} +} + +dscheck_Bigstep() { + # bigstep is activated by presense of seed file 'url' + check_seed_dir "bigstep" url && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + +dscheck_Ec2() { + # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html + # http://paste.ubuntu.com/23630859/ + local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" + is_container && return ${DS_NOT_FOUND} + # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' + if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && + [ "${uuid#ec2}" != "$uuid" ]; then + return ${DS_FOUND} + fi + + # product uuid and product serial start with case insensitive + local uuid=${DI_DMI_PRODUCT_UUID} serial=${DI_DMI_PRODUCT_SERIAL} + case "$uuid:$serial" in + [Ee][Cc]2*:[Ee][Cc]2) + # both start with ec2, now check for case insenstive equal + nocase_equal "$uuid" "$serial" && return ${DS_FOUND};; + esac + + # search through config files to check for platform + local f="" match="${PATH_CLOUD_CONFD}/*ec2*.cfg" + # look for the key 'platform' (datasource/ec2/look_alike/behavior) + if check_config platform "$match"; then + if [ "$platform" != "Unknown" ]; then + _RET="$name" + return "${DS_FOUND}" + fi + fi + + return ${DS_NOT_FOUND} +} + +dscheck_GCE() { + if dmi_product_name_is "Google Compute Engine"; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +dscheck_OpenStack() { + # the openstack metadata http service + + # if there is a config drive, then do not check metadata + # FIXME: if config drive not in the search list, then we should not + # do this check. + check_configdrive_v2 + if [ $? -eq ${DS_FOUND} ]; then + return ${DS_NOT_FOUND} + fi + if dmi_product_name_is "OpenStack Nova"; then + return ${DS_FOUND} + fi + case "${DI_VIRT}" in + lxc|lxc-libvirt) + # FIXME: This could be container on openstack (nova-lxd) + # or nova-libvirt-lxc + return ${DS_NOT_FOUND} + ;; + esac + + return ${DS_NOT_FOUND} +} + +dscheck_AliYun() { + # aliyun is not enabled by default (LP: #1638931) + # so if we are here, it is because the datasource_list was + # set to include it. Thus, 'maybe'. + return $DS_MAYBE +} + +dscheck_AltCloud() { + # ctype: either the dmi product name, or contents of + # /etc/sysconfig/cloud-info + # if ctype == "vsphere" + # device = device with label 'CDROM' + # elif ctype == "rhev" + # device = /dev/floppy + # then, filesystem on that device must have + # user-data.txt or deltacloud-user-data.txt + local ctype="" dev="" + local match_rhev="[Rr][Hh][Ee][Vv]" + local match_vsphere="[Vv][Ss][Pp][Hh][Ee][Rr][Ee]" + local cinfo="${PATH_ROOT}/etc/sysconfig/cloud-info" + if [ -f "$cinfo" ]; then + read ctype < "$cinfo" + else + ctype="${DI_DMI_PRODUCT_NAME}" + fi + case "$ctype" in + ${match_rhev}) + probe_floppy || return ${DS_NOT_FOUND} + dev="/dev/floppy" + ;; + ${match_vsphere}) + block_dev_with_label CDROM || return ${DS_NOT_FOUND} + dev="$_RET" + ;; + *) return ${DS_NOT_FOUND};; + esac + + # FIXME: need to check $dev for user-data.txt or deltacloud-user-data.txt + : "$dev" + return $DS_MAYBE +} + +dscheck_SmartOS() { + # joyent cloud has two virt types: kvm and container + # on kvm, product name on joyent public cloud shows 'SmartDC HVM' + # on the container platform, uname's version has: BrandZ virtual linux + local smartdc_kver="BrandZ virtual linux" + dmi_product_name_matches "SmartDC*" && return $DS_FOUND + if [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] && + [ "${DI_VIRT}" = "container-other" ]; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +dscheck_None() { + return ${DS_NOT_FOUND} +} + +collect_info() { + read_virt + read_kernel_cmdline + read_uname_info + read_config + read_datasource_list + read_dmi_sys_vendor + read_dmi_product_name + read_dmi_product_serial + read_dmi_product_uuid + read_fs_labels +} + +print_info() { + collect_info + _print_info +} + +_print_info() { + local n="" v="" vars="" + vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" + vars="$vars DMI_PRODUCT_UUID" + vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" + vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" + vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" + vars="$vars DSNAME DSLIST" + vars="$vars MODE REPORT ON_FOUND ON_MAYBE ON_NOTFOUND" + for v in ${vars}; do + eval n='${DI_'"$v"'}' + echo "$v=$n" + done + echo "pid=$$ ppid=$PPID" + is_container && echo "is_container=true" || echo "is_container=false" +} + +write_result() { + local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" + if [ "$DI_REPORT" = "true" ]; then + runcfg="$runcfg.report" + fi + for line in "$@"; do + echo "$line" + done > "$runcfg" + ret=$? + [ $ret -eq 0 ] || { + error "failed to write to ${runcfg}" + return $ret + } + return 0 +} + +found() { + local list="" ds="" + # always we write the None datasource last. + for ds in "$@" None; do + list="${list:+${list}, }$ds" + done + write_result "datasource_list: [ $list ]" + return +} + +trim() { + set -- $* + _RET="$*" +} + +unquote() { + # remove quotes from quoted value + local quote='"' tick="'" + local val="$1" + case "$val" in + ${quote}*${quote}|${tick}*${tick}) + val=${val#?}; val=${val%?};; + esac + _RET="$val" +} + +_read_config() { + # reads config from stdin, modifies _rc scoped environment vars. + # rc_policy and _rc_dsname + local line="" hash="#" ckey="" key="" val="" + while read line; do + line=${line%%${hash}*} + key="${line%%:*}" + + # no : in the line. + [ "$key" = "$line" ] && continue + trim "$key" + key=${_RET} + + val="${line#*:}" + trim "$val" + unquote "${_RET}" + val=${_RET} + case "$key" in + datasource) _rc_dsname="$val";; + policy) _rc_policy="$val";; + esac + done +} + +parse_warn() { + echo "WARN: invalid value '$2' for key '$1'. Using $1=$3." 1>&2 +} + +parse_def_policy() { + local _rc_mode="" _rc_report="" _rc_found="" _rc_maybe="" _rc_notfound="" + local ret="" + parse_policy "$@" + ret=$? + _def_mode=$_rc_mode + _def_report=$_rc_report + _def_found=$_rc_found + _def_maybe=$_rc_maybe + _def_notfound=$_rc_notfound + return $ret +} + +parse_policy() { + # parse_policy(policy, default) + # parse a policy string. sets + # _rc_mode (enable|disable,search) + # _rc_report true|false + # _rc_found first|all + # _rc_maybe all|none + # _rc_notfound enable|disable + local def="" + case "$DI_UNAME_MACHINE" in + # these have dmi data + i?86|x86_64|aarch64) def=${DI_DEFAULT_POLICY};; + *) def=${DI_DEFAULT_POLICY_NO_DMI};; + esac + local policy="$1" + local _def_mode="" _def_report="" _def_found="" _def_maybe="" + local _def_notfound="" + if [ $# -eq 1 ] || [ "$2" != "-" ]; then + def=${2:-${def}} + parse_def_policy "$def" - + fi + + local mode="" report="" found="" maybe="" notfound="" + local oifs="$IFS" tok="" val="" + IFS=","; set -- $policy; IFS="$oifs" + for tok in "$@"; do + val=${tok#*=} + case "$tok" in + report) report=true;; + $DI_ENABLED|$DI_DISABLED|search) mode=$tok;; + found=all|found=first) found=$val;; + maybe=all|maybe=none) maybe=$val;; + notfound=$DI_ENABLED|notfound=$DI_DISABLED) notfound=$val;; + found=*) + parse_warn found "$val" "${_def_found}" + found=${_def_found};; + maybe=*) + parse_warn maybe "$val" "${_def_maybe}" + maybe=${_def_maybe};; + notfound=*) + parse_warn notfound "$val" "${_def_notfound}" + notfound=${_def_notfound};; + esac + done + report=${report:-${_def_report:-false}} + _rc_report=${report} + _rc_mode=${mode:-${_def_mode}} + _rc_found=${found:-${_def_found}} + _rc_maybe=${maybe:-${_def_maybe}} + _rc_notfound=${notfound:-${_def_notfound}} +} + +read_config() { + local config=${PATH_DI_CONFIG} + local _rc_dsname="" _rc_policy="" ret="" + if [ -f "$config" ]; then + _read_config < "$config" + ret=$? + elif [ -e "$config" ]; then + error "$config exists but is not a file!" + ret=1 + fi + local tok="" key="" val="" + for tok in ${DI_KERNEL_CMDLINE}; do + key=${tok%%=*} + val=${tok#*=} + case "$key" in + ci.ds) _rc_dsname="$val";; + ci.datasource) _rc_dsname="$val";; + ci.di.policy) _rc_policy="$val";; + esac + done + + local _rc_mode _rc_report _rc_found _rc_maybe _rc_notfound + parse_policy "${_rc_policy}" + debug 1 "policy loaded: mode=${_rc_mode} report=${_rc_report}" \ + "found=${_rc_found} maybe=${_rc_maybe} notfound=${_rc_notfound}" + DI_MODE=${_rc_mode} + DI_REPORT=${_rc_report} + DI_ON_FOUND=${_rc_found} + DI_ON_MAYBE=${_rc_maybe} + DI_ON_NOTFOUND=${_rc_notfound} + + DI_DSNAME="${_rc_dsname}" + return $ret +} + + +manual_clean_and_existing() { + [ -f "${PATH_VAR_LIB_CLOUD}/instance/manual-clean" ] +} + +main() { + local dscheck="" ret_dis=1 ret_en=0 + collect_info + + if [ ! -e "$PATH_RUN_CI_CFG" ]; then + # the first time the generator is run. + _print_info >> "$DI_LOG" + fi + + case "$DI_MODE" in + $DI_DISABLED) + debug 1 "mode=$DI_DISABLED. returning $ret_dis" + return $ret_dis + ;; + $DI_ENABLED) + debug 1 "mode=$DI_ENABLED. returning $ret_en" + return $ret_en;; + search) :;; + esac + + if [ -n "${DI_DSNAME}" ]; then + debug 1 "datasource '$DI_DSNAME' specified." + found "$DI_DSNAME" + return + fi + + if manual_clean_and_existing; then + debug 1 "manual_cache_clean enabled. Not writing datasource_list." + write_result "# manual_cache_clean." + return + fi + + # if there is only a single entry in $DI_DSLIST + set -- $DI_DSLIST + if [ $# -eq 1 ] || [ $# -eq 2 -a "$2" = "None" ] ; then + debug 1 "single entry in datasource_list ($DI_DSLIST) use that." + found "$@" + return + fi + + local found="" ret="" ds="" maybe="" + for ds in ${DI_DSLIST}; do + dscheck_fn="dscheck_${ds}" + debug 2 "Checking for datasource '$ds' via '$dscheck_fn'" + if ! type "$dscheck_fn" >/dev/null 2>&1; then + warn "No check method '$dscheck_fn' for datasource '$ds'" + continue + fi + $dscheck_fn + ret="$?" + case "$ret" in + $DS_FOUND) + debug 1 "check for '$ds' returned found"; + found="${found} $ds";; + $DS_MAYBE) + debug 1 "check for $ds returned maybe"; + maybe="${maybe} $ds";; + *) debug 2 "check for $ds returned not-found[$ret]";; + esac + done + + debug 2 "found=$found maybe=$maybe" + set -- $found + if [ $# -ne 0 ]; then + if [ $# -eq 1 ]; then + debug 1 "Found single datasource: $1" + else + # found=all + debug 1 "Found $# datasources found=${DI_ON_FOUND}: $*" + if [ "${DI_ON_FOUND}" = "first" ]; then + set -- "$1" + fi + fi + found "$@" + return + fi + + set -- $maybe + if [ $# -ne 0 -a "${DI_ON_MAYBE}" != "none" ]; then + debug 1 "$# datasources returned maybe: $*" + found "$@" + return + fi + + case "$DI_ON_NOTFOUND" in + $DI_DISABLED) + debug 1 "No result. notfound=$DI_DISABLED. returning $ret_dis." + return $ret_dis + ;; + $DI_ENABLED) + debug 1 "notfound=$DI_ENABLED. returning $ret_en" + return $ret_en;; + esac + + error "Unexpected result" + return 3 +} + +noop() { + : +} + +case "${DI_MAIN}" in + main|print_info|noop) "${DI_MAIN}" "$@";; + *) error "unexpected value for DI_MAIN"; exit 1;; +esac + +# vi: syntax=sh ts=4 expandtab -- cgit v1.2.3 From e98709225510ee99ee0269c558c82b3e693e38e5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 4 Feb 2017 02:25:19 +0000 Subject: manual_cache_clean: When manually cleaning touch a file in instance dir. When manual_cache_clean is enabled, write a file to /var/lib/cloud/instance/manual-clean. That file can then be read by ds-identify or another tool to indicate that manual cleaning is in place. --- cloudinit/cmd/main.py | 9 ++++++++- cloudinit/helpers.py | 1 + cloudinit/stages.py | 6 ++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 65b15edc..7c652574 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -312,8 +312,15 @@ def main_init(name, args): " would allow us to stop early.") else: existing = "check" - if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False): + mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False) + if mcfg: + LOG.debug("manual cache clean set from config") existing = "trust" + else: + mfile = path_helper.get_ipath_cur("manual_clean_marker") + if os.path.exists(mfile): + LOG.debug("manual cache clean found from marker: %s", mfile) + existing = "trust" init.purge_cache() # Delete the non-net file as well diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 4528fb01..38f5f899 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -339,6 +339,7 @@ class Paths(object): "vendordata_raw": "vendor-data.txt", "vendordata": "vendor-data.txt.i", "instance_id": ".instance-id", + "manual_clean_marker": "manual-clean", } # Set when a datasource becomes active self.datasource = ds diff --git a/cloudinit/stages.py b/cloudinit/stages.py index b0552dde..21763810 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -188,6 +188,12 @@ class Init(object): def _write_to_cache(self): if self.datasource is NULL_DATA_SOURCE: return False + if util.get_cfg_option_bool(self.cfg, 'manual_cache_clean', False): + # The empty file in instance/ dir indicates manual cleaning, + # and can be read by ds-identify. + util.write_file( + self.paths.get_ipath_cur("manual_clean_marker"), + omode="w", content="") return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl")) def _get_datasources(self): -- cgit v1.2.3 From 5f14a0b1ca3079e4ab43d615840866a4b7d8df6a Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 30 Jan 2017 15:31:36 -0700 Subject: code-style: make master pass pycodestyle (2.3.1) cleanly, currently: $ pycodestyle cloudinit/ tests/ tools/ tools/make-mime.py:25:5: E722 do not use bare except' tools/mock-meta.py:252:17: E722 do not use bare except' For tools/make-mime.py:25:5: E722 do not use bare except' the use case is when someone runs ./make-mime.py --attach commis instead of ./make-mime.py --attach commissaire.txt:x-commissaire-host The split can cause a ValueError potentially if there is no: For tools/mock-meta.py:262:17: E722 do not use bare except' the use case is a dictionary look up occurs potentially when an unknown key is given: key_name = key_ids[key_id] Do note that version 2.3.0 falsely reported a dozen or so E302 and E306 errors. --- tools/make-mime.py | 2 +- tools/mock-meta.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/make-mime.py b/tools/make-mime.py index 12727126..f6a72044 100755 --- a/tools/make-mime.py +++ b/tools/make-mime.py @@ -22,7 +22,7 @@ def file_content_type(text): try: filename, content_type = text.split(":", 1) return (open(filename, 'r'), filename, content_type.strip()) - except: + except ValueError: raise argparse.ArgumentError("Invalid value for %r" % (text)) diff --git a/tools/mock-meta.py b/tools/mock-meta.py index a0d99441..95fc4659 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -259,7 +259,10 @@ class MetaDataHandler(object): try: key_id = int(mybe_key) key_name = key_ids[key_id] - except: + except ValueError: + raise WebException(hclient.BAD_REQUEST, + "%s: not an integer" % mybe_key) + except KeyError: raise WebException(hclient.BAD_REQUEST, "Unknown key id %r" % mybe_key) # Extract the possible sub-params -- cgit v1.2.3 From 3bcb72c593f13915a81272778809dd8e71e2c7b7 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 30 Jan 2017 11:24:45 -0700 Subject: Remove style checking during build and add latest style checks to tox - make check will no longer run the style checks, that way package builds wont fail on a style difference in versions of the style tools in that distro. - created style-check make file target to continue to run pep8 and pyflakes - added tox envs 'tip-pycodestyle' and 'tip-pyflakes' to run latest style checking. These are not enabled by default run of tox. LP: #1652329 --- Makefile | 5 ++++- tox.ini | 8 ++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 18ec5680..5940ed7e 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,9 @@ CODE_VERSION=$(shell $(PYVER) -c "from cloudinit import version; print(version.v all: check -check: check_version pep8 $(pyflakes) test $(yaml) +check: check_version test $(yaml) + +style-check: pep8 $(pyflakes) pep8: @$(CWD)/tools/run-pep8 @@ -84,3 +86,4 @@ deb: .PHONY: test pyflakes pyflakes3 clean pep8 rpm deb yaml check_version .PHONY: pip-test-requirements pip-requirements clean_pyc unittest unittest3 +.PHONY: style-check diff --git a/tox.ini b/tox.ini index e79ea6aa..ca5d8b87 100644 --- a/tox.ini +++ b/tox.ini @@ -79,3 +79,11 @@ deps = jsonpatch==1.2 six==1.9.0 -r{toxinidir}/test-requirements.txt + +[testenv:tip-pycodestyle] +commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/} +deps = pycodestyle + +[testenv:tip-pyflakes] +commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} +deps = pyflakes -- cgit v1.2.3 From 7f85a3a5b4586ac7f21309aac4edc39e6ffea9ef Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 9 Feb 2017 12:25:54 -0500 Subject: ds-identify: change aarch64 to use the default for non-dmi systems. aarch64 does support dmi, but OpenStack does not populate guests with this information, and there are currently bugs in qemu preventing it from working correctly see bug #1663304 for more information. So, for the time being, pretend as if there is no dmi data on aarch64, which will make it enable cloud-init even when no datasources are found. --- tools/ds-identify | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/ds-identify b/tools/ds-identify index 203eac0d..f07866a2 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -824,7 +824,9 @@ parse_policy() { local def="" case "$DI_UNAME_MACHINE" in # these have dmi data - i?86|x86_64|aarch64) def=${DI_DEFAULT_POLICY};; + i?86|x86_64) def=${DI_DEFAULT_POLICY};; + # aarch64 has dmi, but not currently used (LP: #1663304) + aarch64) def=${DI_DEFAULT_POLICY_NO_DMI};; *) def=${DI_DEFAULT_POLICY_NO_DMI};; esac local policy="$1" -- cgit v1.2.3 From 0df21b6ea89697e8700ad51158327533aa573c91 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 9 Feb 2017 10:56:01 -0500 Subject: support nova-lxd by reading platform from environment of pid 1. Nova lxd will now put the environment variable 'platform' into pid 1's environment to the value 'OpenStack Nova', which is the same as you would find in kvm guests. LP: #1661797 --- tools/ds-identify | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index f07866a2..88094af7 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -62,6 +62,7 @@ PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}" PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" +PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}" PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}" PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} @@ -81,6 +82,7 @@ DI_DMI_PRODUCT_UUID="" DI_FS_LABELS="" DI_KERNEL_CMDLINE="" DI_VIRT="" +DI_PID_1_PLATFORM="" DI_UNAME_KERNEL_NAME="" DI_UNAME_KERNEL_RELEASE="" @@ -350,6 +352,21 @@ read_datasource_list() { return 0 } +read_pid1_platform() { + local oifs="$IFS" out="" tok="" key="" val="" platform="${UNAVAILABLE}" + cached "${DI_PID_1_PLATFORM}" && return + [ -r "${PATH_PROC_1_ENVIRON}" ] || return + out=$(tr '\0' '\n' <"${PATH_PROC_1_ENVIRON}") + IFS="$CR"; set -- $out; IFS="$oifs" + for tok in "$@"; do + key=${tok%%=*} + [ "$key" != "$tok" ] || continue + val=${tok#*=} + [ "$key" = "platform" ] && platform="$val" && break + done + DI_PID_1_PLATFORM="$platform" +} + dmi_product_name_matches() { is_container && return 1 case "${DI_DMI_PRODUCT_NAME}" in @@ -625,13 +642,9 @@ dscheck_OpenStack() { if dmi_product_name_is "OpenStack Nova"; then return ${DS_FOUND} fi - case "${DI_VIRT}" in - lxc|lxc-libvirt) - # FIXME: This could be container on openstack (nova-lxd) - # or nova-libvirt-lxc - return ${DS_NOT_FOUND} - ;; - esac + if [ "${DI_PID_1_PLATFORM}" = "OpenStack Nova" ]; then + return ${DS_FOUND} + fi return ${DS_NOT_FOUND} } @@ -697,6 +710,7 @@ dscheck_None() { collect_info() { read_virt + read_pid1_platform read_kernel_cmdline read_uname_info read_config @@ -716,7 +730,7 @@ print_info() { _print_info() { local n="" v="" vars="" vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" - vars="$vars DMI_PRODUCT_UUID" + vars="$vars DMI_PRODUCT_UUID PID_1_PLATFORM" vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" -- cgit v1.2.3 From e6098c2cd0a1786ba5b34b603247b4ef644e2312 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 10 Feb 2017 13:44:32 -0600 Subject: ds-identify: read ds=nocloud properly The nocloud datasource specifically would look for ds=nocloud or ds=nocloud-net (often augmented with 'seedfrom') on the kernel command line. Fix to return DS_FOUND in that case. LP: #1663723 --- tools/ds-identify | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/ds-identify b/tools/ds-identify index 88094af7..e454ed6d 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -514,6 +514,9 @@ dscheck_MAAS() { dscheck_NoCloud() { local fslabel="cidata" d="" + case " ${DI_KERNEL_CMDLINE} " in + *\ ds=nocloud*) return ${DS_FOUND};; + esac for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done -- cgit v1.2.3 From 65529b6fca5915438612c161c01fe7b57c2a59b1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 10 Feb 2017 14:14:21 -0600 Subject: ds-identify: fix checking for filesystem label has_fs_with_label regressed when refactoring to not have leading and trailing , in DI_FS_LABELS. LP: #1663735 --- tools/ds-identify | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index e454ed6d..3ba36f8f 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -386,8 +386,8 @@ dmi_sys_vendor_is() { } has_fs_with_label() { - local label=",$1," - case "${DI_FS_LABELS}" in + local label="$1" + case ",${DI_FS_LABELS}," in *,$label,*) return 0;; esac return 1 -- cgit v1.2.3 From 1cd8cfaf1b4d0e3a97c693469d6d987d55014280 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 14 Feb 2017 11:06:36 -0500 Subject: apply the runtime configuration written by ds-identify. When the ds-identify code landed, it started writing /run/cloud.cfg but at the moment, nothing was reading that. The result is that ds-identify only worked to disable cloud-init entirely. --- cloudinit/stages.py | 9 +++++++- tests/unittests/test_data.py | 53 +++++++++++++++++++++++++++++++------------- 2 files changed, 45 insertions(+), 17 deletions(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 21763810..5bed9032 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -11,7 +11,8 @@ import sys import six from six.moves import cPickle as pickle -from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG) +from cloudinit.settings import ( + FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG) from cloudinit import handlers @@ -834,6 +835,10 @@ class Modules(object): return self._run_modules(mostly_mods) +def read_runtime_config(): + return util.read_conf(RUN_CLOUD_CONFIG) + + def fetch_base_config(): return util.mergemanydict( [ @@ -841,6 +846,8 @@ def fetch_base_config(): util.get_builtin_cfg(), # Anything in your conf.d or 'default' cloud.cfg location. util.read_conf_with_confd(CLOUD_CONFIG), + # runtime config + read_runtime_config(), # Kernel/cmdline parameters override system config util.read_conf_from_cmdline(), ], reverse=True) diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 4092d9ca..4ad86bb6 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -564,12 +564,12 @@ class TestConvertString(helpers.TestCase): class TestFetchBaseConfig(helpers.TestCase): - - def test_only_builtin_gets_builtin2(self): + def test_only_builtin_gets_builtin(self): ret = helpers.wrap_and_call( - 'cloudinit.stages.util', - {'read_conf_with_confd': None, - 'read_conf_from_cmdline': None}, + 'cloudinit.stages', + {'util.read_conf_with_confd': None, + 'util.read_conf_from_cmdline': None, + 'read_runtime_config': {'return_value': {}}}, stages.fetch_base_config) self.assertEqual(util.get_builtin_cfg(), ret) @@ -578,9 +578,11 @@ class TestFetchBaseConfig(helpers.TestCase): test_key = sorted(builtin)[0] test_value = 'test' ret = helpers.wrap_and_call( - 'cloudinit.stages.util', - {'read_conf_with_confd': {'return_value': {test_key: test_value}}, - 'read_conf_from_cmdline': None}, + 'cloudinit.stages', + {'util.read_conf_with_confd': + {'return_value': {test_key: test_value}}, + 'util.read_conf_from_cmdline': None, + 'read_runtime_config': {'return_value': {}}}, stages.fetch_base_config) self.assertEqual(ret.get(test_key), test_value) builtin[test_key] = test_value @@ -592,25 +594,44 @@ class TestFetchBaseConfig(helpers.TestCase): test_value = 'test' cmdline = {test_key: test_value} ret = helpers.wrap_and_call( - 'cloudinit.stages.util', - {'read_conf_from_cmdline': {'return_value': cmdline}, - 'read_conf_with_confd': None}, + 'cloudinit.stages', + {'util.read_conf_from_cmdline': {'return_value': cmdline}, + 'util.read_conf_with_confd': None, + 'read_runtime_config': None}, stages.fetch_base_config) self.assertEqual(ret.get(test_key), test_value) builtin[test_key] = test_value self.assertEqual(ret, builtin) - def test_cmdline_overrides_conf_d_and_defaults(self): + def test_cmdline_overrides_confd_runtime_and_defaults(self): builtin = {'key1': 'value0', 'key3': 'other2'} conf_d = {'key1': 'value1', 'key2': 'other1'} cmdline = {'key3': 'other3', 'key2': 'other2'} + runtime = {'key3': 'runtime3'} ret = helpers.wrap_and_call( - 'cloudinit.stages.util', - {'read_conf_with_confd': {'return_value': conf_d}, - 'get_builtin_cfg': {'return_value': builtin}, - 'read_conf_from_cmdline': {'return_value': cmdline}}, + 'cloudinit.stages', + {'util.read_conf_with_confd': {'return_value': conf_d}, + 'util.get_builtin_cfg': {'return_value': builtin}, + 'read_runtime_config': {'return_value': runtime}, + 'util.read_conf_from_cmdline': {'return_value': cmdline}}, stages.fetch_base_config) self.assertEqual(ret, {'key1': 'value1', 'key2': 'other2', 'key3': 'other3'}) + def test_order_precedence_is_builtin_system_runtime_cmdline(self): + builtin = {'key1': 'builtin0', 'key3': 'builtin3'} + conf_d = {'key1': 'confd1', 'key2': 'confd2', 'keyconfd1': 'kconfd1'} + runtime = {'key1': 'runtime1', 'key2': 'runtime2'} + cmdline = {'key1': 'cmdline1'} + ret = helpers.wrap_and_call( + 'cloudinit.stages', + {'util.read_conf_with_confd': {'return_value': conf_d}, + 'util.get_builtin_cfg': {'return_value': builtin}, + 'util.read_conf_from_cmdline': {'return_value': cmdline}, + 'read_runtime_config': {'return_value': runtime}, + }, + stages.fetch_base_config) + self.assertEqual(ret, {'key1': 'cmdline1', 'key2': 'runtime2', + 'key3': 'builtin3', 'keyconfd1': 'kconfd1'}) + # vi: ts=4 expandtab -- cgit v1.2.3 From 91be1d189d9348e81a4c4f1f7d5fc255df1ce6d1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 16 Feb 2017 21:13:38 -0500 Subject: ec2_utils: fix MetadataLeafDecoder that returned bytes on empty the MetadataLeafDecoder would return a bytes value b'' instead of an empty string if the value of a key was empty. In all other cases the value would be a string. This was discovered when trying to json.dumps(get_instance_metadata()) on a recent OpenStack, where the value of 'public-ipv4' was empty. The attempt to dump that with json would raise TypeError: b'' is not JSON serializable --- cloudinit/ec2_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 0c16ae47..13691549 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -28,7 +28,7 @@ class MetadataLeafDecoder(object): def __call__(self, field, blob): if not blob: - return blob + return '' try: blob = util.decode_binary(blob) except UnicodeDecodeError: -- cgit v1.2.3 From f81d6c7bde2af206d449de593b35773068270c84 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 17 Feb 2017 08:55:05 -0500 Subject: net: correct errors in cloudinit/net/sysconfig.py There were some logic errors in sysconfig.py that appear to be the result of accidentally typing "iface" where it should have been "iface_cfg". This patch corrects those problems so that the module can run successfully. LP: #1665441 Resolves: rhbz#1389530 --- cloudinit/net/sysconfig.py | 4 +-- tests/unittests/test_net.py | 87 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 88 insertions(+), 3 deletions(-) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 9be74070..19e220ae 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -283,10 +283,10 @@ class Renderer(renderer.Renderer): cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) elif len(iface_subnets) > 1: for i, iface_subnet in enumerate(iface_subnets, - start=len(iface.children)): + start=len(iface_cfg.children)): iface_sub_cfg = iface_cfg.copy() iface_sub_cfg.name = "%s:%s" % (iface_name, i) - iface.children.append(iface_sub_cfg) + iface_cfg.children.append(iface_sub_cfg) cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet) @classmethod diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b77d277a..1b6288d4 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -159,6 +159,91 @@ NETMASK0=0.0.0.0 ; Created by cloud-init on instance boot automatically, do not edit. ; nameserver 172.19.0.12 +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + }, + { + 'in_data': { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "public-ipv4", + "type": "ipv4", "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [{ + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }], + "ip_address": "172.19.1.34", "id": "network0" + },{ + "network_id": "private-ipv4", + "type": "ipv4", "netmask": "255.255.255.0", + "link": "tap1a81968a-79", + "routes": [], + "ip_address": "10.0.0.10", "id": "network1" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + }, + 'in_macs': { + 'fa:16:3e:ed:9a:59': 'eth0', + }, + 'out_sysconfig': [ + ('etc/sysconfig/network-scripts/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEVICE=eth0 +HWADDR=fa:16:3e:ed:9a:59 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/sysconfig/network-scripts/ifcfg-eth0:0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=static +DEFROUTE=yes +DEVICE=eth0:0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +NETMASK=255.255.252.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/sysconfig/network-scripts/ifcfg-eth0:1', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=static +DEVICE=eth0:1 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=10.0.0.10 +NETMASK=255.255.255.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 """.lstrip()), ('etc/udev/rules.d/70-persistent-net.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', @@ -529,8 +614,8 @@ USERCTL=no self.assertEqual(expected_content, content) def test_openstack_rendering_samples(self): - render_dir = self.tmp_dir() for os_sample in OS_SAMPLES: + render_dir = self.tmp_dir() ex_input = os_sample['in_data'] ex_mac_addrs = os_sample['in_macs'] network_cfg = openstack.convert_net_json( -- cgit v1.2.3 From da25385d0613b373c5746761748782ca1e157d10 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Feb 2017 12:05:38 -0500 Subject: flake8: fix flake8 complaints in previous commit. --- cloudinit/net/sysconfig.py | 6 +++--- tests/unittests/test_net.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 19e220ae..6e7739fb 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -282,12 +282,12 @@ class Renderer(renderer.Renderer): if len(iface_subnets) == 1: cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) elif len(iface_subnets) > 1: - for i, iface_subnet in enumerate(iface_subnets, - start=len(iface_cfg.children)): + for i, isubnet in enumerate(iface_subnets, + start=len(iface_cfg.children)): iface_sub_cfg = iface_cfg.copy() iface_sub_cfg.name = "%s:%s" % (iface_name, i) iface_cfg.children.append(iface_sub_cfg) - cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet) + cls._render_subnet(iface_sub_cfg, route_cfg, isubnet) @classmethod def _render_bond_interfaces(cls, network_state, iface_contents): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 1b6288d4..4b03ff72 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -177,7 +177,7 @@ nameserver 172.19.0.12 "gateway": "172.19.3.254", }], "ip_address": "172.19.1.34", "id": "network0" - },{ + }, { "network_id": "private-ipv4", "type": "ipv4", "netmask": "255.255.255.0", "link": "tap1a81968a-79", -- cgit v1.2.3 From f4e8eb0a18b775e341823cfa1a7b305af753d548 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Feb 2017 15:25:24 -0500 Subject: ds-identify: only run once per boot unless --force is given. This makes ds-identify run only once. Previously it would run multiple times each boot as the generator would run more than once. This is potentially dangerous, in that running again might find more attached disks. However that is really only a "lucky" fix if it happens to result differently than the first run. Additionally, we now log the uptime that we started and ended at. --- tools/ds-identify | 45 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index 3ba36f8f..7bb63862 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -63,9 +63,11 @@ PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}" +PATH_PROC_UPTIME=${PATH_PROC_UPTIME:-${PATH_ROOT}/proc/uptime} PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}" PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} +PATH_RUN_DI_RESULT=${PATH_RUN_DI_RESULT:-${PATH_RUN_CI}/.ds-identify.result} DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" _DI_LOGGED="" @@ -750,6 +752,8 @@ _print_info() { write_result() { local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" if [ "$DI_REPORT" = "true" ]; then + # if report is true, then we write to .report, but touch the other. + : > "$runcfg" runcfg="$runcfg.report" fi for line in "$@"; do @@ -924,12 +928,24 @@ manual_clean_and_existing() { [ -f "${PATH_VAR_LIB_CLOUD}/instance/manual-clean" ] } -main() { +read_uptime() { + local up idle + _RET="${UNAVAILABLE}" + [ -f "$PATH_PROC_UPTIME" ] && + read up idle < "$PATH_PROC_UPTIME" && _RET="$up" + return +} + +_main() { local dscheck="" ret_dis=1 ret_en=0 + + read_uptime + debug 1 "[up ${_RET}s]" "ds-identify $*" collect_info - if [ ! -e "$PATH_RUN_CI_CFG" ]; then - # the first time the generator is run. + if [ "$DI_LOG" = "stderr" ]; then + _print_info 1>&2 + else _print_info >> "$DI_LOG" fi @@ -1022,6 +1038,29 @@ main() { return 3 } +main() { + local ret="" + [ -d "$PATH_RUN_CI" ] || mkdir -p "$PATH_RUN_CI" + if [ "${1:+$1}" != "--force" ] && [ -f "$PATH_RUN_CI_CFG" ] && + [ -f "$PATH_RUN_DI_RESULT" ]; then + if read ret < "$PATH_RUN_DI_RESULT"; then + if [ "$ret" = "0" ] || [ "$ret" = "1" ]; then + debug 2 "used cached result $ret. pass --force to re-run." + return $ret; + fi + debug 1 "previous run returned unexpected '$ret'. Re-running." + else + error "failed to read result from $PATH_RUN_DI_RESULT!" + fi + fi + _main "$@" + ret=$? + echo "$ret" > "$PATH_RUN_DI_RESULT" + read_uptime + debug 1 "[up ${_RET}s]" "returning $ret" + return $ret +} + noop() { : } -- cgit v1.2.3 From 5551e8fc40ba37f0bd133f9478a8db8ce9f79dd7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Feb 2017 14:22:45 -0500 Subject: tools/ds-identify: fix documentation of policy setting in a comment. Just remove some examples that are no longer valid. --- tools/ds-identify | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index 7bb63862..9b14b92a 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -29,17 +29,11 @@ # all: enable all DS_MAYBE # none: ignore any DS_MAYBE # -# notfound: (default=disable) -# disable: disable cloud-init -# enable: enable cloud-init +# notfound: (default=disabled) +# disabled: disable cloud-init +# enabled: enable cloud-init # # -# zesty: -# policy: found=first,maybe=all,none=disable -# xenial: -# policy: found=all,maybe=all,none=enable -# and then at a later date - set -u set -f -- cgit v1.2.3 From cff1335be979fd1be5512d241ab861cfe70d82f0 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Feb 2017 22:42:50 -0500 Subject: tools/ds-identify: use quotes in local declarations. The following can have cause issue: FOO="bar ; wark" showit() { local b=$FOO echo $b } 4: local: ;: bad variable name The answer is just to use more quotes. --- tools/ds-identify | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index 9b14b92a..f2878745 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -601,7 +601,7 @@ dscheck_Ec2() { fi # product uuid and product serial start with case insensitive - local uuid=${DI_DMI_PRODUCT_UUID} serial=${DI_DMI_PRODUCT_SERIAL} + local uuid="${DI_DMI_PRODUCT_UUID}" serial="${DI_DMI_PRODUCT_SERIAL}" case "$uuid:$serial" in [Ee][Cc]2*:[Ee][Cc]2) # both start with ec2, now check for case insenstive equal @@ -883,7 +883,7 @@ parse_policy() { } read_config() { - local config=${PATH_DI_CONFIG} + local config="${PATH_DI_CONFIG}" local _rc_dsname="" _rc_policy="" ret="" if [ -f "$config" ]; then _read_config < "$config" -- cgit v1.2.3 From e0efe853b805ca3c66155b7307a67af5175b3f46 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Feb 2017 17:13:52 -0500 Subject: tools/ds-identify: read the seed directory on Ec2 This just adds checking of the Ec2 seed directory. --- tools/ds-identify | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/ds-identify b/tools/ds-identify index f2878745..c15ba5c0 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -592,8 +592,11 @@ dscheck_Bigstep() { dscheck_Ec2() { # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html # http://paste.ubuntu.com/23630859/ - local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" + + check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} is_container && return ${DS_NOT_FOUND} + + local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && [ "${uuid#ec2}" != "$uuid" ]; then -- cgit v1.2.3 From 56f66872923e653ba64c9f9baa0ad7a23a9da0c1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Feb 2017 11:37:31 -0500 Subject: tools/ds-identify: add support for found or maybe contributing config. A check function that returns found or maybe can also now return config that will be written to the resultant /run/cloud.cfg. They do so by setting the variable _RET_excfg. --- tools/ds-identify | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index c15ba5c0..1cd1118f 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -765,12 +765,22 @@ write_result() { } found() { + # found(ds1, [ds2 ...], [-- [extra lines]]) local list="" ds="" # always we write the None datasource last. - for ds in "$@" None; do - list="${list:+${list}, }$ds" + while [ $# -ne 0 ]; do + if [ "$1" = "--" ]; then + shift + break + fi + list="${list:+${list}, }$1" + shift done - write_result "datasource_list: [ $list ]" + if [ $# -eq 1 ] && [ -z "$1" ]; then + # do not pass an empty line through. + shift + fi + write_result "datasource_list: [ $list ]" "$@" return } @@ -977,7 +987,8 @@ _main() { return fi - local found="" ret="" ds="" maybe="" + local found="" ret="" ds="" maybe="" _RET_excfg="" + local exfound_cfg="" exmaybe_cfg="" for ds in ${DI_DSLIST}; do dscheck_fn="dscheck_${ds}" debug 2 "Checking for datasource '$ds' via '$dscheck_fn'" @@ -985,20 +996,23 @@ _main() { warn "No check method '$dscheck_fn' for datasource '$ds'" continue fi + _RET_excfg="" $dscheck_fn ret="$?" case "$ret" in $DS_FOUND) debug 1 "check for '$ds' returned found"; + exfound_cfg="${exfound_cfg:+${exfound_cfg}${CR}}${_RET_excfg}" found="${found} $ds";; $DS_MAYBE) - debug 1 "check for $ds returned maybe"; + debug 1 "check for '$ds' returned maybe"; + exmaybe_cfg="${exmaybe_cfg:+${exmaybe_cfg}${CR}}${_RET_excfg}" maybe="${maybe} $ds";; - *) debug 2 "check for $ds returned not-found[$ret]";; + *) debug 2 "check for '$ds' returned not-found[$ret]";; esac done - debug 2 "found=$found maybe=$maybe" + debug 2 "found=${found# } maybe=${maybe# }" set -- $found if [ $# -ne 0 ]; then if [ $# -eq 1 ]; then @@ -1010,14 +1024,14 @@ _main() { set -- "$1" fi fi - found "$@" + found "$@" -- "${exfound_cfg}" return fi set -- $maybe if [ $# -ne 0 -a "${DI_ON_MAYBE}" != "none" ]; then debug 1 "$# datasources returned maybe: $*" - found "$@" + found "$@" -- "${exmaybe_cfg}" return fi -- cgit v1.2.3 From 131b6f16a314d863e142d5f59c8488b59e28fa97 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Feb 2017 13:35:51 -0500 Subject: ds-identify: add reading of datasource/Ec2/strict_id ds-identify will now read this setting, and thus allow the user to modify ds-identifies behavior via either: 1. builtin setting here cloud-init/ds-identify builtin 2. ds-identify config (/etc/cloud/ds-identify.cfg) 3. system config (/etc/cloud/cloud.cfg.d/*Ec2*.cfg) 4. kernel command line (ci.datasource.ec2.strict_id=true) --- tools/ds-identify | 98 +++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 85 insertions(+), 13 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index 1cd1118f..bfb55ed1 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -4,12 +4,12 @@ # or on the kernel command line. It takes primarily 2 inputs: # datasource: can specify the datasource that should be used. # kernel command line option: ci.datasource= -# +# # policy: a string that indicates how ds-identify should operate. # kernel command line option: ci.di.policy= # default setting is: # search,found=all,maybe=all,notfound=disable - +# # report: write config to /run/cloud-init/cloud.cfg.report (instead of # /run/cloud-init/cloud.cfg, which effectively makes this dry-run). # enable: do nothing @@ -33,6 +33,10 @@ # disabled: disable cloud-init # enabled: enable cloud-init # +# ci.datasource.ec2.strict_id: (true|false|warn[,0-9]) +# if ec2 datasource does not strictly match, +# return not_found if true +# return maybe if false or warn*. # set -u @@ -589,6 +593,48 @@ dscheck_Bigstep() { return ${DS_NOT_FOUND} } +ec2_read_strict_setting() { + # the 'strict_id' setting for Ec2 controls behavior when + # the platform does not identify itself directly as Ec2. + # order of precedence is: + # 1. builtin setting here cloud-init/ds-identify builtin + # 2. ds-identify config + # 3. system config (/etc/cloud/cloud.cfg.d/*Ec2*.cfg) + # 4. kernel command line (undocumented) + # 5. user-data or vendor-data (not available here) + local default="$1" key="ci.datasource.ec2.strict_id" val="" + + # 4. kernel command line + case " ${DI_KERNEL_CMDLINE} " in + *\ $key=*\ ) + val=${DI_KERNEL_CMDLINE##*$key=} + val=${val%% *}; + _RET=${val:-$default} + return 0 + esac + + # 3. look for the key 'strict_id' (datasource/Ec2/strict_id) + local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" + match="$bp.d/*[Ee][Cc]2*.cfg" + if check_config strict_id "$match"; then + debug 2 "${_RET_fname} set strict_id to $_RET" + return 0 + fi + + # 2. ds-identify config (datasource.ec2.strict) + local config="${PATH_DI_CONFIG}" + if [ -f "$config" ]; then + if _read_config "$key" < "$config"; then + _RET=${_RET:-$default} + return 0 + fi + fi + + # 1. Default + _RET=$default + return 0 +} + dscheck_Ec2() { # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html # http://paste.ubuntu.com/23630859/ @@ -611,17 +657,28 @@ dscheck_Ec2() { nocase_equal "$uuid" "$serial" && return ${DS_FOUND};; esac - # search through config files to check for platform - local f="" match="${PATH_CLOUD_CONFD}/*ec2*.cfg" - # look for the key 'platform' (datasource/ec2/look_alike/behavior) - if check_config platform "$match"; then - if [ "$platform" != "Unknown" ]; then - _RET="$name" - return "${DS_FOUND}" - fi + local default="true" + if ec2_read_strict_setting "$default"; then + strict="$_RET" + else + debug 1 "ec2_read_strict returned non-zero: $?. using '$default'." + strict="$default" fi - return ${DS_NOT_FOUND} + local key="datasource/Ec2/strict_id" + case "$strict" in + true|false|warn|warn,[0-9]*) :;; + *) + warn "$key was set to invalid '$strict'. using '$default'" + strict="$default";; + esac + + _RET_excfg="datasource: {Ec2: {strict_id: \"$strict\"}}" + if [ "$strict" = "true" ]; then + return $DS_NOT_FOUND + else + return $DS_MAYBE + fi } dscheck_GCE() { @@ -801,8 +858,10 @@ unquote() { } _read_config() { - # reads config from stdin, modifies _rc scoped environment vars. - # rc_policy and _rc_dsname + # reads config from stdin, + # if no parameters are set, modifies _rc scoped environment vars. + # if keyname is provided, then returns found value of that key. + local keyname="${1:-_unset}" local line="" hash="#" ckey="" key="" val="" while read line; do line=${line%%${hash}*} @@ -813,15 +872,28 @@ _read_config() { trim "$key" key=${_RET} + [ "$keyname" != "_unset" ] && [ "$keyname" != "$key" ] && + continue + val="${line#*:}" trim "$val" unquote "${_RET}" val=${_RET} + + if [ "$keyname" = "$key" ]; then + _RET="$val" + return 0 + fi + case "$key" in datasource) _rc_dsname="$val";; policy) _rc_policy="$val";; esac done + if [ "$keyname" = "_unset" ]; then + return 1 + fi + return 0 } parse_warn() { -- cgit v1.2.3 From 9bb55c6c45bcc5e310cf7e4d42cad53759dcca15 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Feb 2017 17:15:27 -0500 Subject: DatasourceEc2: add warning message when not on AWS. Based on the setting Datasource/Ec2/strict_id, the datasource will now warn once per instance. --- cloudinit/sources/DataSourceAliYun.py | 4 + cloudinit/sources/DataSourceEc2.py | 178 +++++++++++++++++++++++++++++++++- tools/ds-identify | 40 ++++++-- 3 files changed, 211 insertions(+), 11 deletions(-) diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 2d00255c..9debe947 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -22,6 +22,10 @@ class DataSourceAliYun(EC2.DataSourceEc2): def get_public_ssh_keys(self): return parse_public_keys(self.metadata.get('public-keys', {})) + @property + def cloud_platform(self): + return EC2.Platforms.ALIYUN + def parse_public_keys(public_keys): keys = [] diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index c657fd09..26da263a 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -9,6 +9,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import textwrap import time from cloudinit import ec2_utils as ec2 @@ -22,12 +23,23 @@ LOG = logging.getLogger(__name__) # Which version we are requesting of the ec2 metadata apis DEF_MD_VERSION = '2009-04-04' +STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") +STRICT_ID_DEFAULT = "warn" + + +class Platforms(object): + ALIYUN = "AliYun" + AWS = "AWS" + SEEDED = "Seeded" + UNKNOWN = "Unknown" + class DataSourceEc2(sources.DataSource): # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the # following may be discarded if they do not resolve metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"] + _cloud_platform = None def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -41,8 +53,18 @@ class DataSourceEc2(sources.DataSource): self.userdata_raw = seed_ret['user-data'] self.metadata = seed_ret['meta-data'] LOG.debug("Using seeded ec2 data from %s", self.seed_dir) + self._cloud_platform = Platforms.SEEDED return True + strict_mode, _sleep = read_strict_mode( + util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, + STRICT_ID_DEFAULT), ("warn", None)) + + LOG.debug("strict_mode: %s, cloud_platform=%s", + strict_mode, self.cloud_platform) + if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: + return False + try: if not self.wait_for_metadata_service(): return False @@ -51,8 +73,8 @@ class DataSourceEc2(sources.DataSource): ec2.get_instance_userdata(self.api_ver, self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) - LOG.debug("Crawl of metadata service took %s seconds", - int(time.time() - start_time)) + LOG.debug("Crawl of metadata service took %.3f seconds", + time.time() - start_time) return True except Exception: util.logexc(LOG, "Failed reading from metadata address %s", @@ -190,6 +212,158 @@ class DataSourceEc2(sources.DataSource): return az[:-1] return None + @property + def cloud_platform(self): + if self._cloud_platform is None: + self._cloud_platform = identify_platform() + return self._cloud_platform + + def activate(self, cfg, is_new_instance): + if not is_new_instance: + return + if self.cloud_platform == Platforms.UNKNOWN: + warn_if_necessary( + util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT)) + + +def read_strict_mode(cfgval, default): + try: + return parse_strict_mode(cfgval) + except ValueError as e: + LOG.warn(e) + return default + + +def parse_strict_mode(cfgval): + # given a mode like: + # true, false, warn,[sleep] + # return tuple with string mode (true|false|warn) and sleep. + if cfgval is True: + return 'true', None + if cfgval is False: + return 'false', None + + if not cfgval: + return 'warn', 0 + + mode, _, sleep = cfgval.partition(",") + if mode not in ('true', 'false', 'warn'): + raise ValueError( + "Invalid mode '%s' in strict_id setting '%s': " + "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)) + + if sleep: + try: + sleep = int(sleep) + except ValueError: + raise ValueError("Invalid sleep '%s' in strict_id setting '%s': " + "not an integer" % (sleep, cfgval)) + else: + sleep = None + + return mode, sleep + + +def warn_if_necessary(cfgval): + try: + mode, sleep = parse_strict_mode(cfgval) + except ValueError as e: + LOG.warn(e) + return + + if mode == "false": + return + + show_warning(sleep) + + +def show_warning(sleep): + message = textwrap.dedent(""" + **************************************************************** + # This system is using the EC2 Metadata Service, but does not # + # appear to be running on Amazon EC2 or one of cloud-init's # + # known platforms that provide a EC2 Metadata service. In the # + # future, cloud-init may stop reading metadata from the EC2 # + # Metadata Service unless the platform can be identified # + # # + # If you are seeing this message, please file a bug against # + # cloud-init at https://bugs.launchpad.net/cloud-init/+filebug # + # Make sure to include the cloud provider your instance is # + # running on. # + # # + # For more information see # + # https://bugs.launchpad.net/cloud-init/+bug/1660385 # + # # + # After you have filed a bug, you can disable this warning by # + # launching your instance with the cloud-config below, or # + # putting that content into # + # /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg # + # # + # #cloud-config # + # datasource: # + # Ec2: # + # strict_id: false # + # # + """) + closemsg = "" + if sleep: + closemsg = " [sleeping for %d seconds] " % sleep + message += closemsg.center(64, "*") + print(message) + LOG.warn(message) + if sleep: + time.sleep(sleep) + + +def identify_aws(data): + # data is a dictionary returned by _collect_platform_data. + if (data['uuid'].startswith('ec2') and + (data['uuid_source'] == 'hypervisor' or + data['uuid'] == data['serial'])): + return Platforms.AWS + + return None + + +def identify_platform(): + # identify the platform and return an entry in Platforms. + data = _collect_platform_data() + checks = (identify_aws, lambda x: Platforms.UNKNOWN) + for checker in checks: + try: + result = checker(data) + if result: + return result + except Exception as e: + LOG.warn("calling %s with %s raised exception: %s", + checker, data, e) + + +def _collect_platform_data(): + # returns a dictionary with all lower case values: + # uuid: system-uuid from dmi or /sys/hypervisor + # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' + # serial: dmi 'system-serial-number' (/sys/.../product_serial) + data = {} + try: + uuid = util.load_file("/sys/hypervisor/uuid").strip() + data['uuid_source'] = 'hypervisor' + except Exception: + uuid = util.read_dmi_data('system-uuid') + data['uuid_source'] = 'dmi' + + if uuid is None: + uuid = '' + data['uuid'] = uuid.lower() + + serial = util.read_dmi_data('system-serial-number') + if serial is None: + serial = '' + + data['serial'] = serial.lower() + + return data + # Used to match classes to dependencies datasources = [ diff --git a/tools/ds-identify b/tools/ds-identify index bfb55ed1..dfa856ff 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -635,28 +635,50 @@ ec2_read_strict_setting() { return 0 } -dscheck_Ec2() { - # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html - # http://paste.ubuntu.com/23630859/ - - check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} - is_container && return ${DS_NOT_FOUND} +ec2_identify_platform() { + local default="$1" + local serial="${DI_DMI_PRODUCT_SERIAL}" + # AWS http://docs.aws.amazon.com/AWSEC2/ + # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && [ "${uuid#ec2}" != "$uuid" ]; then - return ${DS_FOUND} + _RET="AWS" + return 0 fi # product uuid and product serial start with case insensitive - local uuid="${DI_DMI_PRODUCT_UUID}" serial="${DI_DMI_PRODUCT_SERIAL}" + local uuid="${DI_DMI_PRODUCT_UUID}" case "$uuid:$serial" in [Ee][Cc]2*:[Ee][Cc]2) # both start with ec2, now check for case insenstive equal - nocase_equal "$uuid" "$serial" && return ${DS_FOUND};; + nocase_equal "$uuid" "$serial" && + { _RET="AWS"; return 0; };; esac + _RET="$default" + return 0; +} + +dscheck_Ec2() { + check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} + is_container && return ${DS_NOT_FOUND} + + local unknown="Unknown" platform="" + if ec2_identify_platform "$unknown"; then + platform="$_RET" + else + warn "Failed to identify ec2 platform. Using '$unknown'." + platform=$unknown + fi + + debug 1 "ec2 platform is '$platform'." + if [ "$platform" != "$unknown" ]; then + return $DS_FOUND + fi + local default="true" if ec2_read_strict_setting "$default"; then strict="$_RET" -- cgit v1.2.3 From 5dd5b2cb539a84ed59f2b3181020d2bd18989718 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Feb 2017 14:19:20 -0500 Subject: Identify Brightbox as an Ec2 datasource user. Brightbox will identify their platform to the guest by setting the product serial to a string that ends with 'brightbox.com'. LP: #1661693 --- cloudinit/sources/DataSourceEc2.py | 8 +++++++- tools/ds-identify | 5 +++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 26da263a..c7df8060 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -30,6 +30,7 @@ STRICT_ID_DEFAULT = "warn" class Platforms(object): ALIYUN = "AliYun" AWS = "AWS" + BRIGHTBOX = "Brightbox" SEEDED = "Seeded" UNKNOWN = "Unknown" @@ -325,10 +326,15 @@ def identify_aws(data): return None +def identify_brightbox(data): + if data['serial'].endswith('brightbox.com'): + return Platforms.BRIGHTBOX + + def identify_platform(): # identify the platform and return an entry in Platforms. data = _collect_platform_data() - checks = (identify_aws, lambda x: Platforms.UNKNOWN) + checks = (identify_aws, identify_brightbox, lambda x: Platforms.UNKNOWN) for checker in checks: try: result = checker(data) diff --git a/tools/ds-identify b/tools/ds-identify index dfa856ff..c39956fc 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -639,6 +639,11 @@ ec2_identify_platform() { local default="$1" local serial="${DI_DMI_PRODUCT_SERIAL}" + # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693 + case "$serial" in + *brightbox.com) _RET="Brightbox"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" -- cgit v1.2.3 From 83606aecaae571ce8eb7d6499de028192d82f79b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Feb 2017 15:13:15 -0500 Subject: tools/ds-identify: ovf identify vmware guest customization. cloud-init by default sets 'disable_vmware_customization' to True. So in ds-identify, we will enable the ovf datasource if: - virt is vmware - 'libdeployPkgPlugin.so' exists as installed by vmware-tools or open-vm-tools. - disable_vmware_customization is configured to True --- tools/ds-identify | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/tools/ds-identify b/tools/ds-identify index c39956fc..34bf0643 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -559,10 +559,45 @@ dscheck_OpenNebula() { return ${DS_NOT_FOUND} } +ovf_vmware_guest_customization() { + # vmware guest customization + + # virt provider must be vmware + [ "${DI_VIRT}" = "vmware" ] || return 1 + + # we have to have the plugin to do vmware customization + local found="" pkg="" pre="/usr/lib" + for pkg in vmware-tools open-vm-tools; do + if [ -f "$pre/$pkg/plugins/vmsvc/libdeployPkgPlugin.so" ]; then + found="$pkg"; break; + fi + done + [ -n "$found" ] || return 1 + + # disable_vmware_customization defaults to False. + # any value then other than false means disabled. + local key="disable_vmware_customization" + local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" + match="$bp.d/*[Oo][Vv][Ff]*.cfg" + if check_config "$key" "$match"; then + debug 2 "${_RET_fname} set $key to $_RET" + case "$_RET" in + 0|false|False) return 0;; + *) return;; + esac + fi + + return 1 +} + dscheck_OVF() { local p="" check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" + if ovf_vmware_guest_customization; then + return ${DS_FOUND} + fi + has_cdrom || return ${DS_NOT_FOUND} # FIXME: currently just return maybe if there is a cdrom -- cgit v1.2.3 From 05afe04edbe4c28f2170194d226821c1e755ee2d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 11:35:35 -0500 Subject: tools/ds-identify: disable vmware_guest_customization by default. ovf_vmware_guest_customization defaults to true in cloud-init, meaning that such customization is disabled. We just missed a return value causing ovf_vmware_guest_customization to effectively default to on. Also, when looking for setting look at /etc/cloud/cloud.cfg. This had been omitted in interest of performance, but we should be looking there. --- tools/ds-identify | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index 34bf0643..e618963b 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -574,16 +574,17 @@ ovf_vmware_guest_customization() { done [ -n "$found" ] || return 1 - # disable_vmware_customization defaults to False. - # any value then other than false means disabled. + # vmware customization is disabled by default + # (disable_vmware_customization=true). If it is set to false, then + # user has requested customization. local key="disable_vmware_customization" local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" - match="$bp.d/*[Oo][Vv][Ff]*.cfg" + match="$bp $bp.d/*[Oo][Vv][Ff]*.cfg" if check_config "$key" "$match"; then debug 2 "${_RET_fname} set $key to $_RET" case "$_RET" in 0|false|False) return 0;; - *) return;; + *) return 1;; esac fi -- cgit v1.2.3 From 4bb60d517da45919310265fa241e1e76b63e97bd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 11:38:34 -0500 Subject: tools/ds-identify: look at cloud.cfg when looking for ec2 strict_id. In the interest of speed I had skipped the parsing of /etc/cloud/cloud.cfg for the ec2 strict_id setting. In hindsight it seems reasonable for people to put settings there. --- tools/ds-identify | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ds-identify b/tools/ds-identify index e618963b..9711a234 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -651,7 +651,7 @@ ec2_read_strict_setting() { # 3. look for the key 'strict_id' (datasource/Ec2/strict_id) local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" - match="$bp.d/*[Ee][Cc]2*.cfg" + match="$bp $bp.d/*[Ee][Cc]2*.cfg" if check_config strict_id "$match"; then debug 2 "${_RET_fname} set strict_id to $_RET" return 0 -- cgit v1.2.3 From ce63e63d7aaf900bac4339503c5d79ff3bd03d18 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 16:55:02 -0500 Subject: Z99-cloud-locale-test.sh: install and make consistent. Modify upstream packaging to install this file, which was already installed in ubuntu packaging. Also, white space changes from tabs to spaces. Very few things in cloud-init are tabs now. Lastly, remove the executable bit on this as ait is not necessary. Scripts in /etc/profile.d do not have executable bit. --- packages/debian/rules.in | 1 + tools/Z99-cloud-locale-test.sh | 148 ++++++++++++++++++++--------------------- 2 files changed, 75 insertions(+), 74 deletions(-) mode change 100755 => 100644 tools/Z99-cloud-locale-test.sh diff --git a/packages/debian/rules.in b/packages/debian/rules.in index 9b004357..3df6053a 100755 --- a/packages/debian/rules.in +++ b/packages/debian/rules.in @@ -11,6 +11,7 @@ override_dh_install: dh_install install -d debian/cloud-init/etc/rsyslog.d cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf + install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh override_dh_auto_test: ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh old mode 100755 new mode 100644 index 5912bae2..4978d87e --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -11,90 +11,90 @@ # of how to fix them. locale_warn() { - local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv="" - local w1 w2 w3 w4 remain + local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv="" + local w1 w2 w3 w4 remain - # if shell is zsh, act like sh only for this function (-L). - # The behavior change will not permenently affect user's shell. - [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh + # if shell is zsh, act like sh only for this function (-L). + # The behavior change will not permenently affect user's shell. + [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh - # locale is expected to output either: - # VARIABLE= - # VARIABLE="value" - # locale: Cannot set LC_SOMETHING to default locale - while read -r w1 w2 w3 w4 remain; do - case "$w1" in - locale:) bad_names="${bad_names} ${w4}";; - *) - key=${w1%%=*} - val=${w1#*=} - val=${val#\"} - val=${val%\"} - vars="${vars} $key=$val";; - esac - done - for bad in $bad_names; do - for var in ${vars}; do - [ "${bad}" = "${var%=*}" ] || continue - val=${var#*=} - [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] && - bad_lcs="${bad_lcs} ${val}" - bad_kv="${bad_kv} $bad=$val" - break - done - done - bad_lcs=${bad_lcs# } - bad_kv=${bad_kv# } - [ -n "$bad_lcs" ] || return 0 + # locale is expected to output either: + # VARIABLE= + # VARIABLE="value" + # locale: Cannot set LC_SOMETHING to default locale + while read -r w1 w2 w3 w4 remain; do + case "$w1" in + locale:) bad_names="${bad_names} ${w4}";; + *) + key=${w1%%=*} + val=${w1#*=} + val=${val#\"} + val=${val%\"} + vars="${vars} $key=$val";; + esac + done + for bad in $bad_names; do + for var in ${vars}; do + [ "${bad}" = "${var%=*}" ] || continue + val=${var#*=} + [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] && + bad_lcs="${bad_lcs} ${val}" + bad_kv="${bad_kv} $bad=$val" + break + done + done + bad_lcs=${bad_lcs# } + bad_kv=${bad_kv# } + [ -n "$bad_lcs" ] || return 0 - printf "_____________________________________________________________________\n" - printf "WARNING! Your environment specifies an invalid locale.\n" - printf " The unknown environment variables are:\n %s\n" "$bad_kv" - printf " This can affect your user experience significantly, including the\n" - printf " ability to manage packages. You may install the locales by running:\n\n" + printf "_____________________________________________________________________\n" + printf "WARNING! Your environment specifies an invalid locale.\n" + printf " The unknown environment variables are:\n %s\n" "$bad_kv" + printf " This can affect your user experience significantly, including the\n" + printf " ability to manage packages. You may install the locales by running:\n\n" - local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED" - local pkgs="" - if [ -e "$sfile" ]; then - for bad in ${bad_lcs}; do - grep -q -i "${bad}" "$sfile" && - to_gen="${to_gen} ${bad}" || - invalid="${invalid} ${bad}" - done - else - printf " sudo apt-get install locales\n" - to_gen=$bad_lcs - fi - to_gen=${to_gen# } + local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED" + local pkgs="" + if [ -e "$sfile" ]; then + for bad in ${bad_lcs}; do + grep -q -i "${bad}" "$sfile" && + to_gen="${to_gen} ${bad}" || + invalid="${invalid} ${bad}" + done + else + printf " sudo apt-get install locales\n" + to_gen=$bad_lcs + fi + to_gen=${to_gen# } - local pkgs="" - for bad in ${to_gen}; do - pkgs="${pkgs} language-pack-${bad%%_*}" - done - pkgs=${pkgs# } + local pkgs="" + for bad in ${to_gen}; do + pkgs="${pkgs} language-pack-${bad%%_*}" + done + pkgs=${pkgs# } - if [ -n "${pkgs}" ]; then - printf " sudo apt-get install ${pkgs# }\n" - printf " or\n" - printf " sudo locale-gen ${to_gen# }\n" - printf "\n" - fi - for bad in ${invalid}; do - printf "WARNING: '${bad}' is an invalid locale\n" - done + if [ -n "${pkgs}" ]; then + printf " sudo apt-get install ${pkgs# }\n" + printf " or\n" + printf " sudo locale-gen ${to_gen# }\n" + printf "\n" + fi + for bad in ${invalid}; do + printf "WARNING: '${bad}' is an invalid locale\n" + done - printf "To see all available language packs, run:\n" - printf " apt-cache search \"^language-pack-[a-z][a-z]$\"\n" - printf "To disable this message for all users, run:\n" - printf " sudo touch /var/lib/cloud/instance/locale-check.skip\n" - printf "_____________________________________________________________________\n\n" + printf "To see all available language packs, run:\n" + printf " apt-cache search \"^language-pack-[a-z][a-z]$\"\n" + printf "To disable this message for all users, run:\n" + printf " sudo touch /var/lib/cloud/instance/locale-check.skip\n" + printf "_____________________________________________________________________\n\n" - # only show the message once - : > ~/.cloud-locale-test.skip 2>/dev/null || : + # only show the message once + : > ~/.cloud-locale-test.skip 2>/dev/null || : } [ -f ~/.cloud-locale-test.skip -o -f /var/lib/cloud/instance/locale-check.skip ] || - locale 2>&1 | locale_warn + locale 2>&1 | locale_warn unset locale_warn -# vi: ts=4 noexpandtab +# vi: ts=4 expandtab -- cgit v1.2.3 From 33518d7d62493c7d00e3792146399c9572abe915 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 16:53:56 -0500 Subject: Add profile.d script for showing warnings on login. Z99-cloudinit-warnings.sh can be dropped into /etc/profile.d. Warnings that are written to /var/lib/cloud/instance/warnings will be displayed to the user on stderr when they log in. --- packages/debian/rules.in | 1 + tools/Z99-cloudinit-warnings.sh | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 tools/Z99-cloudinit-warnings.sh diff --git a/packages/debian/rules.in b/packages/debian/rules.in index 3df6053a..053b7649 100755 --- a/packages/debian/rules.in +++ b/packages/debian/rules.in @@ -12,6 +12,7 @@ override_dh_install: install -d debian/cloud-init/etc/rsyslog.d cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh + install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh override_dh_auto_test: ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) diff --git a/tools/Z99-cloudinit-warnings.sh b/tools/Z99-cloudinit-warnings.sh new file mode 100644 index 00000000..b237786b --- /dev/null +++ b/tools/Z99-cloudinit-warnings.sh @@ -0,0 +1,30 @@ +#!/bin/sh +# This file is part of cloud-init. See LICENSE file for license information. + +# Purpose: show user warnings on login. + +cloud_init_warnings() { + local skipf="" warning="" idir="/var/lib/cloud/instance" n=0 + local warndir="$idir/warnings" + local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip" + [ -d "$warndir" ] || return 0 + [ ! -f "$ufile" ] || return 0 + [ ! -f "$skipf" ] || return 0 + + for warning in "$warndir"/*; do + [ -f "$warning" ] || continue + cat "$warning" + n=$((n+1)) + done + [ $n -eq 0 ] && return 0 + echo "" + echo "Disable the warnings above by:" + echo " touch $ufile" + echo "or" + echo " touch $sfile" +} + +cloud_init_warnings 1>&2 +unset cloud_init_warnings + +# vi: syntax=sh ts=4 expandtab -- cgit v1.2.3 From ade8c2e0266b020089145075e8236b95c000a3cb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 17:14:45 -0500 Subject: Move warning functionality to cloudinit/warnings.py This moves the warning code that was added specifically for EC2 into a generic path at cloudinit/warnings.py. It also adds support for writing warning files into the warnings directory to be shown by Z99-cloudinit-warnings.sh. --- cloudinit/helpers.py | 1 + cloudinit/sources/DataSourceEc2.py | 47 ++------------- cloudinit/warnings.py | 115 +++++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 42 deletions(-) create mode 100644 cloudinit/warnings.py diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 38f5f899..7435d58d 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -340,6 +340,7 @@ class Paths(object): "vendordata": "vendor-data.txt.i", "instance_id": ".instance-id", "manual_clean_marker": "manual-clean", + "warnings": "warnings", } # Set when a datasource becomes active self.datasource = ds diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index c7df8060..6f01a139 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -9,7 +9,6 @@ # This file is part of cloud-init. See LICENSE file for license information. import os -import textwrap import time from cloudinit import ec2_utils as ec2 @@ -17,6 +16,7 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util +from cloudinit import warnings LOG = logging.getLogger(__name__) @@ -224,7 +224,8 @@ class DataSourceEc2(sources.DataSource): return if self.cloud_platform == Platforms.UNKNOWN: warn_if_necessary( - util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT)) + util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), + cfg) def read_strict_mode(cfgval, default): @@ -265,7 +266,7 @@ def parse_strict_mode(cfgval): return mode, sleep -def warn_if_necessary(cfgval): +def warn_if_necessary(cfgval, cfg): try: mode, sleep = parse_strict_mode(cfgval) except ValueError as e: @@ -275,45 +276,7 @@ def warn_if_necessary(cfgval): if mode == "false": return - show_warning(sleep) - - -def show_warning(sleep): - message = textwrap.dedent(""" - **************************************************************** - # This system is using the EC2 Metadata Service, but does not # - # appear to be running on Amazon EC2 or one of cloud-init's # - # known platforms that provide a EC2 Metadata service. In the # - # future, cloud-init may stop reading metadata from the EC2 # - # Metadata Service unless the platform can be identified # - # # - # If you are seeing this message, please file a bug against # - # cloud-init at https://bugs.launchpad.net/cloud-init/+filebug # - # Make sure to include the cloud provider your instance is # - # running on. # - # # - # For more information see # - # https://bugs.launchpad.net/cloud-init/+bug/1660385 # - # # - # After you have filed a bug, you can disable this warning by # - # launching your instance with the cloud-config below, or # - # putting that content into # - # /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg # - # # - # #cloud-config # - # datasource: # - # Ec2: # - # strict_id: false # - # # - """) - closemsg = "" - if sleep: - closemsg = " [sleeping for %d seconds] " % sleep - message += closemsg.center(64, "*") - print(message) - LOG.warn(message) - if sleep: - time.sleep(sleep) + warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep) def identify_aws(data): diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py new file mode 100644 index 00000000..77c092f9 --- /dev/null +++ b/cloudinit/warnings.py @@ -0,0 +1,115 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import util + +import os +import time + +LOG = logging.getLogger() + +WARNINGS = { + 'non_ec2_md': """ +This system is using the EC2 Metadata Service, but does not appear to +be running on Amazon EC2 or one of cloud-init's known platforms that +provide a EC2 Metadata service. In the future, cloud-init may stop +reading metadata from the EC2 Metadata Service unless the platform can +be identified. + +If you are seeing this message, please file a bug against +cloud-init at + https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid +Make sure to include the cloud provider your instance is +running on. + +For more information see + https://bugs.launchpad.net/bugs/1660385 + +After you have filed a bug, you can disable this warning by +launching your instance with the cloud-config below, or +putting that content into + /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg + +#cloud-config +datasource: + Ec2: + strict_id: false""", +} + + +def _get_warn_dir(cfg): + paths = helpers.Paths( + path_cfgs=cfg.get('system_info', {}).get('paths', {})) + return paths.get_ipath_cur('warnings') + + +def _load_warn_cfg(cfg, name, mode=True, sleep=None): + # parse cfg['warnings']['name'] returning boolean, sleep + # expected value is form of: + # (on|off|true|false|sleep)[,sleeptime] + # boolean True == on, False == off + default = (mode, sleep) + if not cfg or not isinstance(cfg, dict): + return default + + ncfg = util.get_cfg_by_path(cfg, ('warnings', name)) + if ncfg is None: + return default + + if ncfg in ("on", "true", True): + return True, None + + if ncfg in ("off", "false", False): + return False, None + + mode, _, csleep = ncfg.partition(",") + if mode != "sleep": + return default + + if csleep: + try: + sleep = int(csleep) + except ValueError: + return default + + return True, sleep + + +def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs): + # kwargs are used for .format of the message. + # sleep and mode are default values used if + # cfg['warnings']['name'] is not present. + if cfg is None: + cfg = {} + + mode, sleep = _load_warn_cfg(cfg, name, mode=mode, sleep=sleep) + if not mode: + return + + msg = WARNINGS[name].format(**kwargs) + msgwidth = 70 + linewidth = msgwidth + 4 + + fmt = "# %%-%ds #" % msgwidth + topline = "*" * linewidth + "\n" + fmtlines = [] + for line in msg.strip("\n").splitlines(): + fmtlines.append(fmt % line) + + closeline = topline + if sleep: + sleepmsg = " [sleeping for %d seconds] " % sleep + closeline = sleepmsg.center(linewidth, "*") + "\n" + + util.write_file( + os.path.join(_get_warn_dir(cfg), name), + topline + "\n".join(fmtlines) + "\n" + topline) + + LOG.warn(topline + "\n".join(fmtlines) + "\n" + closeline) + + if sleep: + LOG.debug("sleeping %d seconds for warning '%s'" % (sleep, name)) + time.sleep(sleep) + +# vi: ts=4 expandtab -- cgit v1.2.3 From 51a24555e5e7af709caa8dab1a5e6c7e7f317b17 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 1 Mar 2017 12:12:15 -0500 Subject: tools/ds-identify: make report mode write namespaced results. Now, when ds-identify runs in report mode, it still writes to /run/cloud-init.cfg as search does, but it will namespace the result under the top level 'di_report' entry. --- tools/ds-identify | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index 9711a234..fd2a46c8 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -10,8 +10,9 @@ # default setting is: # search,found=all,maybe=all,notfound=disable # -# report: write config to /run/cloud-init/cloud.cfg.report (instead of -# /run/cloud-init/cloud.cfg, which effectively makes this dry-run). +# report: write config to /run/cloud-init/cloud.cfg, but +# namespaced under 'di_report'. Thus cloud-init can still see +# the result, but has no affect. # enable: do nothing # ds-identify writes no config and just exits success # the caller (cloud-init-generator) then enables cloud-init to run @@ -867,15 +868,16 @@ _print_info() { } write_result() { - local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" - if [ "$DI_REPORT" = "true" ]; then - # if report is true, then we write to .report, but touch the other. - : > "$runcfg" - runcfg="$runcfg.report" - fi - for line in "$@"; do - echo "$line" - done > "$runcfg" + local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" pre="" + { + if [ "$DI_REPORT" = "true" ]; then + echo "di_report:" + pre=" " + fi + for line in "$@"; do + echo "${pre}$line"; + done + } > "$runcfg" ret=$? [ $ret -eq 0 ] || { error "failed to write to ${runcfg}" @@ -956,6 +958,7 @@ _read_config() { if [ "$keyname" = "_unset" ]; then return 1 fi + _RET="" return 0 } -- cgit v1.2.3 From 79db2e2436d91510aceb8c036c4a945362c85a52 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 1 Mar 2017 15:50:40 -0500 Subject: Support warning if the used datasource is not in ds-identify's list. If ds-identify is in report mode, and the datasource that is found is not in the list, then warn the user of this situation. --- cloudinit/cmd/main.py | 39 +++++++++++++++++++++++++++++++++++++++ cloudinit/warnings.py | 24 ++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 7c652574..6ff4e1c0 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -29,6 +29,7 @@ from cloudinit import templater from cloudinit import url_helper from cloudinit import util from cloudinit import version +from cloudinit import warnings from cloudinit import reporting from cloudinit.reporting import events @@ -413,10 +414,48 @@ def main_init(name, args): # give the activated datasource a chance to adjust init.activate_datasource() + di_report_warn(datasource=init.datasource, cfg=init.cfg) + # Stage 10 return (init.datasource, run_module_section(mods, name, name)) +def di_report_warn(datasource, cfg): + if 'di_report' not in cfg: + LOG.debug("no di_report found in config.") + return + + dicfg = cfg.get('di_report', {}) + if not isinstance(dicfg, dict): + LOG.warn("di_report config not a dictionary: %s", dicfg) + return + + dslist = dicfg.get('datasource_list') + if dslist is None: + LOG.warn("no 'datasource_list' found in di_report.") + return + elif not isinstance(dslist, list): + LOG.warn("di_report/datasource_list not a list: %s", dslist) + return + + # ds.__module__ is like cloudinit.sources.DataSourceName + # where Name is the thing that shows up in datasource_list. + modname = datasource.__module__.rpartition(".")[2] + if modname.startswith(sources.DS_PREFIX): + modname = modname[len(sources.DS_PREFIX):] + else: + LOG.warn("Datasource '%s' came from unexpected module '%s'.", + datasource, modname) + + if modname in dslist: + LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", + datasource, modname, dslist) + return + + warnings.show_warning('dsid_missing_source', cfg, + source=modname, dslist=str(dslist)) + + def main_modules(action_name, args): name = args.mode # Cloud-init 'modules' stages are broken up into the following sub-stages diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py index 77c092f9..3206d4e9 100644 --- a/cloudinit/warnings.py +++ b/cloudinit/warnings.py @@ -35,6 +35,30 @@ putting that content into datasource: Ec2: strict_id: false""", + 'dsid_missing_source': """ +A new feature in cloud-init identified possible datasources for +this system as: + {dslist} +However, the datasource used was: {source} + +In the future, cloud-init will only attempt to use datasources that +are identified or specifically configured. +For more information see + https://bugs.launchpad.net/bugs/1669675 + +If you are seeing this message, please file a bug against +cloud-init at + https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid +Make sure to include the cloud provider your instance is +running on. + +After you have filed a bug, you can disable this warning by launching +your instance with the cloud-config below, or putting that content +into /etc/cloud/cloud.cfg.d/99-warnings.cfg + +#cloud-config +warnings: + dsid_missing_source: off""", } -- cgit v1.2.3 From d914ed8e573d464c4d21aa41069beb73fd3ce9be Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 2 Mar 2017 21:45:47 -0500 Subject: ds-identify: record not found in cloud.cfg and always add None. On a 'not found' result, was not being written at all. That had the unintended effect of '--force' not working. Now, on a 'not found' result: - if reporting: write the list as found (with just 'None'). - if not reporting: only report that there was nothing found. this means that the warning cloud-init will write about ds-identify failing to find a datasource will be written, but cloud-init will still search its fully configured list. --- tools/ds-identify | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index fd2a46c8..741cf3ae 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -886,10 +886,23 @@ write_result() { return 0 } +record_notfound() { + # in report mode, report nothing was found. + # if not report mode: only report the negative result. + # reporting an empty list would mean cloud-init would not search + # any datasources. + if [ "$DI_REPORT" = "true" ]; then + found -- + else + local msg="# reporting not found result. notfound=${DI_ON_NOTFOUND}." + local DI_REPORT="true" + found -- "$msg" + fi +} + found() { # found(ds1, [ds2 ...], [-- [extra lines]]) local list="" ds="" - # always we write the None datasource last. while [ $# -ne 0 ]; do if [ "$1" = "--" ]; then shift @@ -902,6 +915,8 @@ found() { # do not pass an empty line through. shift fi + # always write the None datasource last. + list="${list:+${list}, }None" write_result "datasource_list: [ $list ]" "$@" return } @@ -1173,13 +1188,15 @@ _main() { return fi + # record the empty result. + record_notfound case "$DI_ON_NOTFOUND" in $DI_DISABLED) debug 1 "No result. notfound=$DI_DISABLED. returning $ret_dis." return $ret_dis ;; $DI_ENABLED) - debug 1 "notfound=$DI_ENABLED. returning $ret_en" + debug 1 "No result. notfound=$DI_ENABLED. returning $ret_en" return $ret_en;; esac -- cgit v1.2.3 From c81ea53bbdc4ada9d2b52430e106aeb3c38b4e0a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 2 Mar 2017 23:19:45 -0500 Subject: ds-identify: move default setting for Ec2/strict_id to a global. Rather than having the dscheck_Ec2 just know the setting, move it up to a more formal declaration. This will make it look more clean when a distro carries a patch to change it to warn. --- tools/ds-identify | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/ds-identify b/tools/ds-identify index 741cf3ae..d7b2a0b2 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -109,6 +109,7 @@ DI_ON_FOUND="" DI_ON_MAYBE="" DI_ON_NOTFOUND="" +DI_EC2_STRICT_ID_DEFAULT="true" error() { set -- "ERROR:" "$@"; @@ -721,7 +722,7 @@ dscheck_Ec2() { return $DS_FOUND fi - local default="true" + local default="${DI_EC2_STRICT_ID_DEFAULT}" if ec2_read_strict_setting "$default"; then strict="$_RET" else -- cgit v1.2.3