From a1b185d0cce5064e9b36b4db7b55564e2ab1d7a8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Oct 2016 22:53:17 -0400 Subject: Get early logging logged, including failures of cmdline url. Failures to load the kernel command line's url (cloud-config-url=) would previously get swallowed. This should make it much more obvious when that happens. With logging going to expected places at sane levels (WARN will go to stderr by default). --- cloudinit/cmd/main.py | 118 +++++++++++++++++++++++++++++++++++++++++++------- cloudinit/util.py | 44 ------------------- 2 files changed, 103 insertions(+), 59 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index c83496c5..65b15edc 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -26,6 +26,7 @@ from cloudinit import signal_handler from cloudinit import sources from cloudinit import stages from cloudinit import templater +from cloudinit import url_helper from cloudinit import util from cloudinit import version @@ -129,23 +130,104 @@ def apply_reporting_cfg(cfg): reporting.update_configuration(cfg.get('reporting')) +def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')): + data = util.keyval_str_to_dict(cmdline) + for key in names: + if key in data: + return key, data[key] + raise KeyError("No keys (%s) found in string '%s'" % + (cmdline, names)) + + +def attempt_cmdline_url(path, network=True, cmdline=None): + """Write data from url referenced in command line to path. + + path: a file to write content to if downloaded. + network: should network access be assumed. + cmdline: the cmdline to parse for cloud-config-url. + + This is used in MAAS datasource, in "ephemeral" (read-only root) + environment where the instance netboots to iscsi ro root. + and the entity that controls the pxe config has to configure + the maas datasource. + + An attempt is made on network urls even in local datasource + for case of network set up in initramfs. + + Return value is a tuple of a logger function (logging.DEBUG) + and a message indicating what happened. + """ + + if cmdline is None: + cmdline = util.get_cmdline() + + try: + cmdline_name, url = parse_cmdline_url(cmdline) + except KeyError: + return (logging.DEBUG, "No kernel command line url found.") + + path_is_local = url.startswith("file://") or url.startswith("/") + + if path_is_local and os.path.exists(path): + if network: + m = ("file '%s' existed, possibly from local stage download" + " of command line url '%s'. Not re-writing." % (path, url)) + level = logging.INFO + if path_is_local: + level = logging.DEBUG + else: + m = ("file '%s' existed, possibly from previous boot download" + " of command line url '%s'. Not re-writing." % (path, url)) + level = logging.WARN + + return (level, m) + + kwargs = {'url': url, 'timeout': 10, 'retries': 2} + if network or path_is_local: + level = logging.WARN + kwargs['sec_between'] = 1 + else: + level = logging.DEBUG + kwargs['sec_between'] = .1 + + data = None + header = b'#cloud-config' + try: + resp = util.read_file_or_url(**kwargs) + if resp.ok(): + data = resp.contents + if not resp.contents.startswith(header): + if cmdline_name == 'cloud-config-url': + level = logging.WARN + else: + level = logging.INFO + return ( + level, + "contents of '%s' did not start with %s" % (url, header)) + else: + return (level, + "url '%s' returned code %s. Ignoring." % (url, resp.code)) + + except url_helper.UrlError as e: + return (level, "retrieving url '%s' failed: %s" % (url, e)) + + util.write_file(path, data, mode=0o600) + return (logging.INFO, + "wrote cloud-config data from %s='%s' to %s" % + (cmdline_name, url, path)) + + def main_init(name, args): deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] if args.local: deps = [sources.DEP_FILESYSTEM] - if not args.local: - # See doc/kernel-cmdline.txt - # - # This is used in maas datasource, in "ephemeral" (read-only root) - # environment where the instance netboots to iscsi ro root. - # and the entity that controls the pxe config has to configure - # the maas datasource. - # - # Could be used elsewhere, only works on network based (not local). - root_name = "%s.d" % (CLOUD_CONFIG) - target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg") - util.read_write_cmdline_url(target_fn) + early_logs = [] + early_logs.append( + attempt_cmdline_url( + path=os.path.join("%s.d" % CLOUD_CONFIG, + "91_kernel_cmdline_url.cfg"), + network=not args.local)) # Cloud-init 'init' stage is broken up into the following sub-stages # 1. Ensure that the init object fetches its config without errors @@ -171,12 +253,14 @@ def main_init(name, args): outfmt = None errfmt = None try: - LOG.debug("Closing stdin") + early_logs.append((logging.DEBUG, "Closing stdin.")) util.close_stdin() (outfmt, errfmt) = util.fixup_output(init.cfg, name) except Exception: - util.logexc(LOG, "Failed to setup output redirection!") - print_exc("Failed to setup output redirection!") + msg = "Failed to setup output redirection!" + util.logexc(LOG, msg) + print_exc(msg) + early_logs.append((logging.WARN, msg)) if args.debug: # Reset so that all the debug handlers are closed out LOG.debug(("Logging being reset, this logger may no" @@ -190,6 +274,10 @@ def main_init(name, args): # been redirected and log now configured. welcome(name, msg=w_msg) + # re-play early log messages before logging was setup + for lvl, msg in early_logs: + LOG.log(lvl, msg) + # Stage 3 try: init.initialize() diff --git a/cloudinit/util.py b/cloudinit/util.py index 5725129e..7196a7ca 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1089,31 +1089,6 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"): return fqdn -def get_cmdline_url(names=('cloud-config-url', 'url'), - starts=b"#cloud-config", cmdline=None): - if cmdline is None: - cmdline = get_cmdline() - - data = keyval_str_to_dict(cmdline) - url = None - key = None - for key in names: - if key in data: - url = data[key] - break - - if not url: - return (None, None, None) - - resp = read_file_or_url(url) - # allow callers to pass starts as text when comparing to bytes contents - starts = encode_text(starts) - if resp.ok() and resp.contents.startswith(starts): - return (key, url, resp.contents) - - return (key, url, None) - - def is_resolvable(name): """determine if a url is resolvable, return a boolean This also attempts to be resilent against dns redirection. @@ -1475,25 +1450,6 @@ def ensure_dirs(dirlist, mode=0o755): ensure_dir(d, mode) -def read_write_cmdline_url(target_fn): - if not os.path.exists(target_fn): - try: - (key, url, content) = get_cmdline_url() - except Exception: - logexc(LOG, "Failed fetching command line url") - return - try: - if key and content: - write_file(target_fn, content, mode=0o600) - LOG.debug(("Wrote to %s with contents of command line" - " url %s (len=%s)"), target_fn, url, len(content)) - elif key and not content: - LOG.debug(("Command line key %s with url" - " %s had no contents"), key, url) - except Exception: - logexc(LOG, "Failed writing url content to %s", target_fn) - - def yaml_dumps(obj, explicit_start=True, explicit_end=True): return yaml.safe_dump(obj, line_break="\n", -- cgit v1.2.3 From a3daf184fd47dede8d91588281437453bd38fc1c Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 1 Dec 2016 19:40:36 -0500 Subject: Use dnf instead of yum when available Recent fedora releases use "dnf" instead of "yum" for package management. While there is a compatible "yum" cli available, there's no guarantee that it will be available. With this patch, cloud-init will check for /usr/bin/dnf and use that if it exists instead of yum. rhbz: https://bugzilla.redhat.com/show_bug.cgi?id=1194451 LP: #1647118 --- cloudinit/distros/rhel.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index aa558381..7498c63a 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -190,13 +190,18 @@ class Distro(distros.Distro): if pkgs is None: pkgs = [] - cmd = ['yum'] - # If enabled, then yum will be tolerant of errors on the command line - # with regard to packages. - # For example: if you request to install foo, bar and baz and baz is - # installed; yum won't error out complaining that baz is already - # installed. - cmd.append("-t") + if util.which('dnf'): + LOG.debug('Using DNF for package management') + cmd = ['dnf'] + else: + LOG.debug('Using YUM for package management') + # the '-t' argument makes yum tolerant of errors on the command + # line with regard to packages. + # + # For example: if you request to install foo, bar and baz and baz + # is installed; yum won't error out complaining that baz is already + # installed. + cmd = ['yum', '-t'] # Determines whether or not yum prompts for confirmation # of critical actions. We don't want to prompt... cmd.append("-y") -- cgit v1.2.3 From e2274393b882c723ab93189c57e7e68a46e4e10f Mon Sep 17 00:00:00 2001 From: Jeremy Bicha Date: Thu, 12 Jan 2017 20:00:55 -0500 Subject: Fix minor docs typo: perserve > preserve --- cloudinit/config/cc_set_hostname.py | 2 +- doc/examples/cloud-config.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index e42799f9..aa3dfe5f 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -27,7 +27,7 @@ will be used. **Config keys**:: - perserve_hostname: + preserve_hostname: fqdn: hostname: """ diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index c5f84b13..c03f1026 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -200,7 +200,7 @@ ssh_import_id: [smoser] # # Default: none # -debconf_selections: | # Need to perserve newlines +debconf_selections: | # Need to preserve newlines # Force debconf priority to critical. debconf debconf/priority select critical -- cgit v1.2.3 From 8ddb57149281ba2658696f19c1eb96e7769381e4 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 6 Sep 2016 14:51:32 -0700 Subject: Fixed Misc issues related to VMware customization. - staticIPV4 property can be either None or a valid Array. Need to check for None before accessing the ip address. - Modified few misc. log messages. - Added a new log message while waiting for the customization config file. - Added support to configure the maximum amount of time to wait for the customization config file. - VMware Customization Support is provided only for DataSourceOVF class and not for any other child classes. Implemented a new variable vmware_customization_supported to check whether the 'VMware Customization' support is available for a specific datasource or not. - Changed the function get_vmware_cust_settings to get_max_wait_from_cfg. - Removed the code that does 'ifdown and iup' in NIC configurator. --- cloudinit/sources/DataSourceOVF.py | 37 +++++++++++++++++++--- cloudinit/sources/helpers/vmware/imc/config_nic.py | 24 ++++---------- 2 files changed, 39 insertions(+), 22 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 78928c77..d70784ac 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -48,6 +48,7 @@ class DataSourceOVF(sources.DataSource): self.environment = None self.cfg = {} self.supported_seed_starts = ("/", "file://") + self.vmware_customization_supported = True def __str__(self): root = sources.DataSource.__str__(self) @@ -78,7 +79,10 @@ class DataSourceOVF(sources.DataSource): found.append(seed) elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") - if not util.get_cfg_option_bool( + if not self.vmware_customization_supported: + LOG.debug("Skipping the check for " + "VMware Customization support") + elif not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") @@ -90,17 +94,18 @@ class DataSourceOVF(sources.DataSource): # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs # to search for the file in that directory. + max_wait = get_max_wait_from_cfg(self.ds_cfg) vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", func=wait_for_imc_cfg_file, - args=("/var/run/vmware-imc", "cust.cfg")) + args=("/var/run/vmware-imc", "cust.cfg", max_wait)) if vmwareImcConfigFilePath: - LOG.debug("Found VMware DeployPkg Config File at %s" % + LOG.debug("Found VMware Customization Config File at %s", vmwareImcConfigFilePath) else: - LOG.debug("Did not find VMware DeployPkg Config File Path") + LOG.debug("Did not find VMware Customization Config File") else: LOG.debug("Customization for VMware platform is disabled.") @@ -206,6 +211,29 @@ class DataSourceOVFNet(DataSourceOVF): DataSourceOVF.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net') self.supported_seed_starts = ("http://", "https://", "ftp://") + self.vmware_customization_supported = False + + +def get_max_wait_from_cfg(cfg): + default_max_wait = 90 + max_wait_cfg_option = 'vmware_cust_file_max_wait' + max_wait = default_max_wait + + if not cfg: + return max_wait + + try: + max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait)) + except ValueError: + LOG.warn("Failed to get '%s', using %s", + max_wait_cfg_option, default_max_wait) + + if max_wait <= 0: + LOG.warn("Invalid value '%s' for '%s', using '%s' instead", + max_wait, max_wait_cfg_option, default_max_wait) + max_wait = default_max_wait + + return max_wait def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): @@ -215,6 +243,7 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): fileFullPath = search_file(dirpath, filename) if fileFullPath: return fileFullPath + LOG.debug("Waiting for VMware Customization Config File") time.sleep(naplen) waited += naplen return None diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index d5a7c346..67ac21db 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -101,7 +101,11 @@ class NicConfigurator(object): return lines # Static Ipv4 - v4 = nic.staticIpv4 + addrs = nic.staticIpv4 + if not addrs: + return lines + + v4 = addrs[0] if v4.ip: lines.append(' address %s' % v4.ip) if v4.netmask: @@ -197,22 +201,6 @@ class NicConfigurator(object): util.subp(["pkill", "dhclient"], rcs=[0, 1]) util.subp(["rm", "-f", "/var/lib/dhcp/*"]) - def if_down_up(self): - names = [] - for nic in self.nics: - name = self.mac2Name.get(nic.mac.lower()) - names.append(name) - - for name in names: - logger.info('Bring down interface %s' % name) - util.subp(["ifdown", "%s" % name]) - - self.clear_dhcp() - - for name in names: - logger.info('Bring up interface %s' % name) - util.subp(["ifup", "%s" % name]) - def configure(self): """ Configure the /etc/network/intefaces @@ -232,6 +220,6 @@ class NicConfigurator(object): for line in lines: fp.write('%s\n' % line) - self.if_down_up() + self.clear_dhcp() # vi: ts=4 expandtab -- cgit v1.2.3 From 4cf53f1544f8f5629330eab3efef1a18255c277a Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Tue, 17 Jan 2017 08:53:22 -0500 Subject: OpenStack: Use timeout and retries from config in get_data. This modifies get_data in DataSourceOpenStack.py to get the timeout and retries values from the data source configuration, rather than from keyword arguments. This permits get_data to use the same timeout as other methods, and allows an operator to increase the timeout in environments where the metadata service takes longer than five seconds to respond. LP: #1657130 Resolves: rhbz#1408589 --- cloudinit/sources/DataSourceOpenStack.py | 15 ++++++++++++--- tests/unittests/test_datasource/test_openstack.py | 8 ++++---- 2 files changed, 16 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 2a58f1cd..e1ea21f8 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -45,6 +45,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): # max_wait < 0 indicates do not wait max_wait = -1 timeout = 10 + retries = 5 try: max_wait = int(self.ds_cfg.get("max_wait", max_wait)) @@ -55,7 +56,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) except Exception: util.logexc(LOG, "Failed to get timeout, using %s", timeout) - return (max_wait, timeout) + + try: + retries = int(self.ds_cfg.get("retries", retries)) + except Exception: + util.logexc(LOG, "Failed to get max wait. using %s", retries) + + return (max_wait, timeout, retries) def wait_for_metadata_service(self): urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) @@ -76,7 +83,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls.append(md_url) url2base[md_url] = url - (max_wait, timeout) = self._get_url_settings() + (max_wait, timeout, retries) = self._get_url_settings() start_time = time.time() avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, timeout=timeout) @@ -89,13 +96,15 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.metadata_address = url2base.get(avail_url) return bool(avail_url) - def get_data(self, retries=5, timeout=5): + def get_data(self): try: if not self.wait_for_metadata_service(): return False except IOError: return False + (max_wait, timeout, retries) = self._get_url_settings() + try: results = util.log_time(LOG.debug, 'Crawl of openstack metadata service', diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index e5b6fcc6..28e1833e 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -232,7 +232,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): None, helpers.Paths({})) self.assertIsNone(ds_os.version) - found = ds_os.get_data(timeout=0.1, retries=0) + found = ds_os.get_data() self.assertTrue(found) self.assertEqual(2, ds_os.version) md = dict(ds_os.metadata) @@ -256,7 +256,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): None, helpers.Paths({})) self.assertIsNone(ds_os.version) - found = ds_os.get_data(timeout=0.1, retries=0) + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) @@ -275,7 +275,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'timeout': 0, } self.assertIsNone(ds_os.version) - found = ds_os.get_data(timeout=0.1, retries=0) + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) @@ -298,7 +298,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'timeout': 0, } self.assertIsNone(ds_os.version) - found = ds_os.get_data(timeout=0.1, retries=0) + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) -- cgit v1.2.3 From b71592ce0e0a9f9f9f225315015ca57b312ad30d Mon Sep 17 00:00:00 2001 From: Andrew Jorgensen Date: Tue, 1 Nov 2016 10:54:31 -0400 Subject: EC2: Do not cache security credentials on disk On EC2, instance metadata can include credentials that remain valid for as much as 6 hours. Reading these and allowing them to be pickled represents a potential vulnerability if a snapshot of the disk is taken and shared as part of an AMI. This skips security-credentials when walking the meta-data tree. LP: #1638312 Reviewed-by: Ian Weller Reviewed-by: Ben Cressey Reported-by: Kyle Barnes --- cloudinit/ec2_utils.py | 3 +++ tests/unittests/test_ec2_util.py | 45 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index c656ef14..0c16ae47 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -82,6 +82,9 @@ class MetadataMaterializer(object): field_name = get_name(field) if not field or not field_name: continue + # Don't materialize credentials + if field_name == 'security-credentials': + continue if has_children(field): if field_name not in children: children.append(field_name) diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index 4a33d747..71c2009f 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -140,4 +140,49 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(bdm['ami'], 'sdb') self.assertEqual(bdm['ephemeral0'], 'sdc') + @hp.activate + def test_metadata_no_security_credentials(self): + base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) + hp.register_uri(hp.GET, base_url, status=200, + body="\n".join(['instance-id', + 'iam/'])) + hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'), + status=200, body='i-0123451689abcdef0') + hp.register_uri(hp.GET, + uh.combine_url(base_url, 'iam/'), + status=200, + body="\n".join(['info/', 'security-credentials/'])) + hp.register_uri(hp.GET, + uh.combine_url(base_url, 'iam/info/'), + status=200, + body='LastUpdated') + hp.register_uri(hp.GET, + uh.combine_url(base_url, 'iam/info/LastUpdated'), + status=200, body='2016-10-27T17:29:39Z') + hp.register_uri(hp.GET, + uh.combine_url(base_url, 'iam/security-credentials/'), + status=200, + body='ReadOnly/') + hp.register_uri(hp.GET, + uh.combine_url(base_url, + 'iam/security-credentials/ReadOnly/'), + status=200, + body="\n".join(['LastUpdated', 'Expiration'])) + hp.register_uri(hp.GET, + uh.combine_url( + base_url, + 'iam/security-credentials/ReadOnly/LastUpdated'), + status=200, body='2016-10-27T17:28:17Z') + hp.register_uri(hp.GET, + uh.combine_url( + base_url, + 'iam/security-credentials/ReadOnly/Expiration'), + status=200, body='2016-10-28T00:00:34Z') + md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) + self.assertEqual(md['instance-id'], 'i-0123451689abcdef0') + iam = md['iam'] + self.assertEqual(1, len(iam)) + self.assertEqual(iam['info']['LastUpdated'], '2016-10-27T17:29:39Z') + self.assertNotIn('security-credentials', iam) + # vi: ts=4 expandtab -- cgit v1.2.3 From 853df0a0e85002582694b88db886f206f64b23c7 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 20 Jan 2017 14:32:08 -0500 Subject: Add 3 ecdsa-sha2-nistp* ssh key types now that they are standardized cloud-init adds ssh_authorized_keys to the default user and to root but for root it disables the keys with a prefix command. However, if the public_key key is of type ecdsa-sha2-nistp521, it is not parsed correctly, and the prefix command is not prepended. Resolves: rhbz#1151824 LP: #1658174 --- cloudinit/ssh_util.py | 3 +++ tests/unittests/test_sshutil.py | 24 +++++++++++++++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index be8a49e8..b95b956f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -22,8 +22,11 @@ DEF_SSHD_CFG = "/etc/ssh/sshd_config" VALID_KEY_TYPES = ( "dsa", "ecdsa", + "ecdsa-sha2-nistp256", "ecdsa-sha2-nistp256-cert-v01@openssh.com", + "ecdsa-sha2-nistp384", "ecdsa-sha2-nistp384-cert-v01@openssh.com", + "ecdsa-sha2-nistp521", "ecdsa-sha2-nistp521-cert-v01@openssh.com", "ed25519", "rsa", diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 55971b5e..991f45a6 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -32,6 +32,22 @@ VALID_CONTENT = { "YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07" "/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw==" ), + 'ecdsa-sha2-nistp256': ( + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMy/WuXq5MF" + "r5hVQ9EEKKUTF7vUaOkgxUh6bNsCs9SFMVslIm1zM/WJYwUv52LdEePjtDYiV4A" + "l2XthJ9/bs7Pc=" + ), + 'ecdsa-sha2-nistp521': ( + "AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABOdNTkh9F" + "McK4hZRLs5LTXBEXwNr0+Yg9uvJYRFcz2ZlnjYX9tM4Z3QQFjqogU4pU+zpKLqZ" + "5VE4Jcnb1T608UywBIdXkSFZT8trGJqBv9nFWGgmTX3KP8kiBbihpuv1cGwglPl" + "Hxs50A42iP0JiT7auGtEAGsu/uMql323GTGb4171Q==" + ), + 'ecdsa-sha2-nistp384': ( + "AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBAnoqFU9Gnl" + "LcsEuCJnobs/c6whzvjCgouaOO61kgXNtIxyF4Wkutg6xaGYgBBt/phb7a2TurI" + "bcIBuzJ/mP22UyUAbNnBfStAEBmYbrTf1EfiMCYUAr1XnL0UdYmZ8HFg==" + ), } TEST_OPTIONS = ( @@ -44,7 +60,13 @@ class TestAuthKeyLineParser(test_helpers.TestCase): def test_simple_parse(self): # test key line with common 3 fields (keytype, base64, comment) parser = ssh_util.AuthKeyLineParser() - for ktype in ['rsa', 'ecdsa', 'dsa']: + ecdsa_types = [ + 'ecdsa-sha2-nistp256', + 'ecdsa-sha2-nistp384', + 'ecdsa-sha2-nistp521', + ] + + for ktype in ['rsa', 'ecdsa', 'dsa'] + ecdsa_types: content = VALID_CONTENT[ktype] comment = 'user-%s@host' % ktype line = ' '.join((ktype, content, comment,)) -- cgit v1.2.3 From 2de1c247e285cce0b25ab70abdc56ccd41019c27 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 25 Jan 2017 15:45:40 -0600 Subject: Fix eni rendering of multiple IPs per interface The iface:alias syntax for eni rendering is brittle with ipv6. Replace it with using multiple iface stanzas with the same iface name which is supported. Side-effect is that one can no longer do 'ifup $iface:$alias' but requires instead use of ip address {add|delete} instead. LP: #1657940 --- cloudinit/net/eni.py | 33 ++++++++++++++++++-------------- tests/unittests/test_net.py | 46 +++++++++++++++++++++++++++++++++------------ 2 files changed, 53 insertions(+), 26 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index b06ffac9..5b249f1f 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -90,8 +90,6 @@ def _iface_add_attrs(iface, index): def _iface_start_entry(iface, index, render_hwaddress=False): fullname = iface['name'] - if index != 0: - fullname += ":%s" % index control = iface['control'] if control == "auto": @@ -113,6 +111,16 @@ def _iface_start_entry(iface, index, render_hwaddress=False): return lines +def _subnet_is_ipv6(subnet): + # 'static6' or 'dhcp6' + if subnet['type'].endswith('6'): + # This is a request for DHCPv6. + return True + elif subnet['type'] == 'static' and ":" in subnet['address']: + return True + return False + + def _parse_deb_config_data(ifaces, contents, src_dir, src_path): """Parses the file contents, placing result into ifaces. @@ -354,21 +362,23 @@ class Renderer(renderer.Renderer): sections = [] subnets = iface.get('subnets', {}) if subnets: - for index, subnet in zip(range(0, len(subnets)), subnets): + for index, subnet in enumerate(subnets): iface['index'] = index iface['mode'] = subnet['type'] iface['control'] = subnet.get('control', 'auto') subnet_inet = 'inet' - if iface['mode'].endswith('6'): - # This is a request for DHCPv6. - subnet_inet += '6' - elif iface['mode'] == 'static' and ":" in subnet['address']: - # This is a static IPv6 address. + if _subnet_is_ipv6(subnet): subnet_inet += '6' iface['inet'] = subnet_inet - if iface['mode'].startswith('dhcp'): + if subnet['type'].startswith('dhcp'): iface['mode'] = 'dhcp' + # do not emit multiple 'auto $IFACE' lines as older (precise) + # ifupdown complains + if True in ["auto %s" % (iface['name']) in line + for line in sections]: + iface['control'] = 'alias' + lines = list( _iface_start_entry( iface, index, render_hwaddress=render_hwaddress) + @@ -378,11 +388,6 @@ class Renderer(renderer.Renderer): for route in subnet.get('routes', []): lines.extend(self._render_route(route, indent=" ")) - if len(subnets) > 1 and index == 0: - tmpl = " post-up ifup %s:%s\n" - for i in range(1, len(subnets)): - lines.append(tmpl % (iface['name'], i)) - sections.append(lines) else: # ifenslave docs say to auto the slave devices diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 2c2bde96..b77d277a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -219,11 +219,9 @@ NETWORK_CONFIGS = { auto eth99 iface eth99 inet dhcp - post-up ifup eth99:1 - - auto eth99:1 - iface eth99:1 inet static + # control-alias eth99 + iface eth99 inet static address 192.168.21.3/24 dns-nameservers 8.8.8.8 8.8.4.4 dns-search barley.maas sach.maas @@ -261,6 +259,27 @@ NETWORK_CONFIGS = { - wark.maas """), }, + 'v4_and_v6': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + + # control-alias iface0 + iface iface0 inet6 dhcp + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp4'} + - {'type': 'dhcp6'} + """).rstrip(' '), + }, 'all': { 'expected_eni': ("""\ auto lo @@ -298,11 +317,9 @@ iface br0 inet static address 192.168.14.2/24 bridge_ports eth3 eth4 bridge_stp off - post-up ifup br0:1 - -auto br0:1 -iface br0:1 inet6 static +# control-alias br0 +iface br0 inet6 static address 2001:1::1/64 auto bond0.200 @@ -319,11 +336,9 @@ iface eth0.101 inet static mtu 1500 vlan-raw-device eth0 vlan_id 101 - post-up ifup eth0.101:1 - -auto eth0.101:1 -iface eth0.101:1 inet static +# control-alias eth0.101 +iface eth0.101 inet static address 192.168.2.10/24 post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true @@ -758,6 +773,13 @@ class TestEniRoundTrip(CiTestCase): entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) + def testsimple_render_v4_and_v6(self): + entry = NETWORK_CONFIGS['v4_and_v6'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + def test_routes_rendered(self): # as reported in bug 1649652 conf = [ -- cgit v1.2.3 From 9698b0ded3d7e72f54513f248d8da41e08472f68 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 4 Feb 2017 02:24:55 +0000 Subject: Add tools/ds-identify to identify datasources available. ds-identify is run here from the generator. If ds-identify does not see any datasources, it can completely disable cloud-init. The big value in this is that if there is no datasource, no python will ever be loaded, and cloud-init will be disabled.o The default policy being added here is: search,found=all,maybe=all,notfound=disabled That means: - enable (in 'datasource_list') all sources that are found. - if none are found, enable all 'maybe'. - if no maybe are found, then disable cloud-init. On platforms without DMI (everything except for aarch64 and x86), the default 'notfound' setting is 'enabled'. This is because many of the detection mechanisms rely on dmi data, which is present only on x86 and aarch64. --- cloudinit/settings.py | 2 + setup.py | 3 +- systemd/cloud-init-generator | 39 +- tools/ds-identify | 1015 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1056 insertions(+), 3 deletions(-) create mode 100755 tools/ds-identify (limited to 'cloudinit') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b1fdd31f..692ff5e5 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -14,6 +14,8 @@ CFG_ENV_NAME = "CLOUD_CFG" # This is expected to be a yaml formatted file CLOUD_CONFIG = '/etc/cloud/cloud.cfg' +RUN_CLOUD_CONFIG = '/run/cloud-init/cloud.cfg' + # What u get if no config is provided CFG_BUILTIN = { 'datasource_list': [ diff --git a/setup.py b/setup.py index 04036078..e6693c90 100755 --- a/setup.py +++ b/setup.py @@ -168,7 +168,8 @@ else: (ETC + '/cloud/templates', glob('templates/*')), (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), - (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', + (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', + 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), (USR + '/share/doc/cloud-init/examples', diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator index fedb6309..bd9f2678 100755 --- a/systemd/cloud-init-generator +++ b/systemd/cloud-init-generator @@ -6,6 +6,8 @@ DEBUG_LEVEL=1 LOG_D="/run/cloud-init" ENABLE="enabled" DISABLE="disabled" +FOUND="found" +NOTFOUND="notfound" RUN_ENABLED_FILE="$LOG_D/$ENABLE" CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" CLOUD_TARGET_NAME="cloud-init.target" @@ -74,10 +76,30 @@ default() { _RET="$ENABLE" } +check_for_datasource() { + local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" + if [ ! -x "$dsidentify" ]; then + debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" + return 0 + fi + $dsidentify + ds_rc=$? + debug 1 "ds-identify rc=$ds_rc" + if [ "$ds_rc" = "0" ]; then + _RET="$FOUND" + debug 1 "ds-identify _RET=$_RET" + return 0 + fi + _RET="$NOTFOUND" + debug 1 "ds-identify _RET=$_RET" + return 1 +} + main() { local normal_d="$1" early_d="$2" late_d="$3" local target_name="multi-user.target" gen_d="$early_d" local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}" + local ds="$NOTFOUND" debug 1 "$0 normal=$normal_d early=$early_d late=$late_d" debug 2 "$0 $*" @@ -93,7 +115,20 @@ main() { debug 0 "search $search returned $ret" fi done - + + # enable AND ds=found == enable + # enable AND ds=notfound == disable + # disable || == disabled + if [ "$result" = "$ENABLE" ]; then + debug 1 "checking for datasource" + check_for_datasource + ds=$_RET + if [ "$ds" = "$NOTFOUND" ]; then + debug 1 "cloud-init is enabled but no datasource found, disabling" + result="$DISABLE" + fi + fi + if [ "$result" = "$ENABLE" ]; then if [ -e "$link_path" ]; then debug 1 "already enabled: no change needed" @@ -124,7 +159,7 @@ main() { rm -f "$RUN_ENABLED_FILE" fi else - debug 0 "unexpected result '$result'" + debug 0 "unexpected result '$result' 'ds=$ds'" ret=3 fi return $ret diff --git a/tools/ds-identify b/tools/ds-identify new file mode 100755 index 00000000..203eac0d --- /dev/null +++ b/tools/ds-identify @@ -0,0 +1,1015 @@ +#!/bin/sh +# +# ds-identify is configured via /etc/cloud/ds-identify.cfg +# or on the kernel command line. It takes primarily 2 inputs: +# datasource: can specify the datasource that should be used. +# kernel command line option: ci.datasource= +# +# policy: a string that indicates how ds-identify should operate. +# kernel command line option: ci.di.policy= +# default setting is: +# search,found=all,maybe=all,notfound=disable + +# report: write config to /run/cloud-init/cloud.cfg.report (instead of +# /run/cloud-init/cloud.cfg, which effectively makes this dry-run). +# enable: do nothing +# ds-identify writes no config and just exits success +# the caller (cloud-init-generator) then enables cloud-init to run +# just without any aid from ds-identify. +# disable: disable cloud-init +# +# [report,]found=value,maybe=value,notfound=value +# found: (default=first) +# first: use the first found do no further checking +# all: enable all DS_FOUND +# +# maybe: (default=all) +# if nothing returned 'found', then how to handle maybe. +# no network sources are allowed to return 'maybe'. +# all: enable all DS_MAYBE +# none: ignore any DS_MAYBE +# +# notfound: (default=disable) +# disable: disable cloud-init +# enable: enable cloud-init +# +# +# zesty: +# policy: found=first,maybe=all,none=disable +# xenial: +# policy: found=all,maybe=all,none=enable +# and then at a later date + + +set -u +set -f +UNAVAILABLE="unavailable" +CR=" +" +ERROR="error" +DI_ENABLED="enabled" +DI_DISABLED="disabled" + +DI_DEBUG_LEVEL="${DEBUG_LEVEL:-1}" + +PATH_ROOT=${PATH_ROOT:-""} +PATH_RUN=${PATH_RUN:-"${PATH_ROOT}/run"} +PATH_SYS_CLASS_DMI_ID=${PATH_SYS_CLASS_DMI_ID:-${PATH_ROOT}/sys/class/dmi/id} +PATH_SYS_HYPERVISOR=${PATH_SYS_HYPERVISOR:-${PATH_ROOT}/sys/hypervisor} +PATH_SYS_CLASS_BLOCK=${PATH_SYS_CLASS_BLOCK:-${PATH_ROOT}/sys/class/block} +PATH_DEV_DISK="${PATH_DEV_DISK:-${PATH_ROOT}/dev/disk}" +PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}" +PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" +PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" +PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" +PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}" +PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" +PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} + +DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" +_DI_LOGGED="" + +# set DI_MAIN='noop' in environment to source this file with no main called. +DI_MAIN=${DI_MAIN:-main} + +DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" +DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" +DI_DMI_PRODUCT_NAME="" +DI_DMI_SYS_VENDOR="" +DI_DMI_PRODUCT_SERIAL="" +DI_DMI_PRODUCT_UUID="" +DI_FS_LABELS="" +DI_KERNEL_CMDLINE="" +DI_VIRT="" + +DI_UNAME_KERNEL_NAME="" +DI_UNAME_KERNEL_RELEASE="" +DI_UNAME_KERNEL_VERSION="" +DI_UNAME_MACHINE="" +DI_UNAME_NODENAME="" +DI_UNAME_OPERATING_SYSTEM="" +DI_UNAME_CMD_OUT="" + +DS_FOUND=0 +DS_NOT_FOUND=1 +DS_MAYBE=2 + +DI_DSNAME="" +# this has to match the builtin list in cloud-init, it is what will +# be searched if there is no setting found in config. +DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ +CloudSigma CloudStack DigitalOcean Ec2 OpenNebula OpenStack OVF SmartOS" +DI_DSLIST="" +DI_MODE="" +DI_REPORT="" +DI_ON_FOUND="" +DI_ON_MAYBE="" +DI_ON_NOTFOUND="" + + +error() { + set -- "ERROR:" "$@"; + debug 0 "$@" + stderr "$@" +} +warn() { + set -- "WARN:" "$@" + debug 0 "$@" + stderr "$@" +} + +stderr() { echo "$@" 1>&2; } + +debug() { + local lvl="$1" + shift + [ "$lvl" -gt "${DI_DEBUG_LEVEL}" ] && return + + if [ "$_DI_LOGGED" != "$DI_LOG" ]; then + # first time here, open file descriptor for append + case "$DI_LOG" in + stderr) :;; + ?*/*) + if [ ! -d "${DI_LOG%/*}" ]; then + mkdir -p "${DI_LOG%/*}" || { + stderr "ERROR:" "cannot write to $DI_LOG" + DI_LOG="stderr" + } + fi + esac + if [ "$DI_LOG" = "stderr" ]; then + exec 3>&2 + else + ( exec 3>>"$DI_LOG" ) && exec 3>>"$DI_LOG" || { + stderr "ERROR: failed writing to $DI_LOG. logging to stderr."; + exec 3>&2 + DI_LOG="stderr" + } + fi + _DI_LOGGED="$DI_LOG" + fi + echo "$@" 1>&3 +} + +get_dmi_field() { + local path="${PATH_SYS_CLASS_DMI_ID}/$1" + if [ ! -f "$path" ] || [ ! -r "$path" ]; then + _RET="$UNAVAILABLE" + return + fi + read _RET < "${path}" || _RET="$ERROR" +} + +block_dev_with_label() { + local p="${PATH_DEV_DISK}/by-label/$1" + [ -b "$p" ] || return 1 + _RET=$p + return 0 +} + +read_fs_labels() { + cached "${DI_FS_LABELS}" && return 0 + # do not rely on links in /dev/disk which might not be present yet. + # note that older blkid versions do not report DEVNAME in 'export' output. + local out="" ret=0 oifs="$IFS" line="" delim="," + local labels="" + if is_container; then + # blkid will in a container, or at least currently in lxd + # not provide useful information. + DI_FS_LABELS="$UNAVAILABLE:container" + else + out=$(blkid -c /dev/null -o export) || { + ret=$? + error "failed running [$ret]: blkid -c /dev/null -o export" + return $ret + } + IFS="$CR" + set -- $out + IFS="$oifs" + for line in "$@"; do + case "${line}" in + LABEL=*) labels="${labels}${line#LABEL=}${delim}";; + esac + done + DI_FS_LABELS="${labels%${delim}}" + fi +} + +cached() { + [ -n "$1" ] && _RET="$1" && return || return 1 +} + + +has_cdrom() { + [ -e "${PATH_ROOT}/dev/cdrom" ] +} + +read_virt() { + cached "$DI_VIRT" && return 0 + local out="" r="" virt="${UNAVAILABLE}" + if [ -d /run/systemd ]; then + out=$(systemd-detect-virt 2>&1) + r=$? + if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then + virt="$out" + fi + fi + DI_VIRT=$virt +} + +is_container() { + case "${DI_VIRT}" in + lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;; + *) return 1;; + esac +} + +read_kernel_cmdline() { + cached "${DI_KERNEL_CMDLINE}" && return + local cmdline="" fpath="${PATH_PROC_CMDLINE}" + if is_container; then + local p1path="${PATH_PROC_1_CMDLINE}" x="" + cmdline="${UNAVAILABLE}:container" + if [ -f "$p1path" ] && x=$(tr '\0' ' ' < "$p1path"); then + cmdline=$x + fi + elif [ -f "$fpath" ]; then + read cmdline <"$fpath" + else + cmdline="${UNAVAILABLE}:no-cmdline" + fi + DI_KERNEL_CMDLINE="$cmdline" +} + +read_dmi_sys_vendor() { + cached "${DI_DMI_SYS_VENDOR}" && return + get_dmi_field sys_vendor + DI_DMI_SYS_VENDOR="$_RET" +} + +read_dmi_product_name() { + cached "${DI_DMI_PRODUCT_NAME}" && return + get_dmi_field product_name + DI_DMI_PRODUCT_NAME="$_RET" +} + +read_dmi_product_uuid() { + cached "${DI_DMI_PRODUCT_UUID}" && return + get_dmi_field product_uuid + DI_DMI_PRODUCT_UUID="$_RET" +} + +read_dmi_product_serial() { + cached "${DI_DMI_PRODUCT_SERIAL}" && return + get_dmi_field product_serial + DI_DMI_PRODUCT_SERIAL="$_RET" +} + +read_uname_info() { + # run uname, and parse output. + # uname is tricky to parse as it outputs always in a given order + # independent of option order. kernel-version is known to have spaces. + # 1 -s kernel-name + # 2 -n nodename + # 3 -r kernel-release + # 4.. -v kernel-version(whitespace) + # N-2 -m machine + # N-1 -o operating-system + cached "${DI_UNAME_CMD_OUT}" && return + local out="${1:-}" ret=0 buf="" + if [ -z "$out" ]; then + out=$(uname -snrvmo) || { + ret=$? + error "failed reading uname with 'uname -snrvmo'" + return $ret + } + fi + set -- $out + DI_UNAME_KERNEL_NAME="$1" + DI_UNAME_NODENAME="$2" + DI_UNAME_KERNEL_RELEASE="$3" + shift 3 + while [ $# -gt 2 ]; do + buf="$buf $1" + shift + done + DI_UNAME_KERNEL_VERSION="${buf# }" + DI_UNAME_MACHINE="$1" + DI_UNAME_OPERATING_SYSTEM="$2" + DI_UNAME_CMD_OUT="$out" + return 0 +} + +parse_yaml_array() { + # parse a yaml single line array value ([1,2,3], not key: [1,2,3]). + # supported with or without leading and closing brackets + # ['1'] or [1] + # '1', '2' + local val="$1" oifs="$IFS" ret="" tok="" + val=${val#[} + val=${val%]} + IFS=","; set -- $val; IFS="$oifs" + for tok in "$@"; do + trim "$tok" + unquote "$_RET" + ret="${ret} $_RET" + done + _RET="${ret# }" +} + +read_datasource_list() { + cached "$DI_DSLIST" && return + local dslist="" + # if DI_DSNAME is set directly, then avoid parsing config. + if [ -n "${DI_DSNAME}" ]; then + dslist="${DI_DSNAME}" + fi + + # LP: #1582323. cc:{'datasource_list': ['name']} + # more generically cc:[end_cc] + local cb="]" ob="[" + case "$DI_KERNEL_CMDLINE" in + *cc:*datasource_list*) + t=${DI_KERNEL_CMDLINE##*datasource_list} + t=${t%%$cb*} + t=${t##*$ob} + parse_yaml_array "$t" + dslist=${_RET} + ;; + esac + if [ -z "$dslist" ] && check_config datasource_list; then + debug 1 "$_RET_fname set datasource_list: $_RET" + parse_yaml_array "$_RET" + dslist=${_RET} + fi + if [ -z "$dslist" ]; then + dslist=${DI_DSLIST_DEFAULT} + debug 1 "no datasource_list found, using default:" $dslist + fi + DI_DSLIST=$dslist + return 0 +} + +dmi_product_name_matches() { + is_container && return 1 + case "${DI_DMI_PRODUCT_NAME}" in + $1) return 0;; + esac + return 1 +} + +dmi_product_name_is() { + is_container && return 1 + [ "${DI_DMI_PRODUCT_NAME}" = "$1" ] +} + +dmi_sys_vendor_is() { + is_container && return 1 + [ "${DI_DMI_SYS_VENDOR}" = "$1" ] +} + +has_fs_with_label() { + local label=",$1," + case "${DI_FS_LABELS}" in + *,$label,*) return 0;; + esac + return 1 +} + +nocase_equal() { + # nocase_equal(a, b) + # return 0 if case insenstive comparision a.lower() == b.lower() + # different lengths + [ "${#1}" = "${#2}" ] || return 1 + # case sensitive equal + [ "$1" = "$2" ] && return 0 + + local delim="-delim-" + out=$(echo "$1${delim}$2" | tr A-Z a-z) + [ "${out#*${delim}}" = "${out%${delim}*}" ] +} + +check_seed_dir() { + # check_seed_dir(name, [required]) + # check the seed dir /var/lib/cloud/seed/ for 'required' + # required defaults to 'meta-data' + local name="$1" + local dir="${PATH_VAR_LIB_CLOUD}/seed/$name" + [ -d "$dir" ] || return 1 + shift + if [ $# -eq 0 ]; then + set -- meta-data + fi + local f="" + for f in "$@"; do + [ -f "$dir/$f" ] || return 1 + done + return 0 +} + +probe_floppy() { + cached "${STATE_FLOPPY_PROBED}" && return "${STATE_FLOPPY_PROBED}" + local fpath=/dev/floppy + + [ -b "$fpath" ] || + { STATE_FLOPPY_PROBED=1; return 1; } + + modprobe --use-blacklist floppy >/dev/null 2>&1 || + { STATE_FLOPPY_PROBED=1; return 1; } + + udevadm settle "--exit-if-exists=$fpath" || + { STATE_FLOPPY_PROBED=1; return 1; } + + [ -b "$fpath" ] + STATE_FLOPPY_PROBED=$? + return "${STATE_FLOPPY_PROBED}" +} + + +dscheck_CloudStack() { + is_container && return ${DS_NOT_FOUND} + dmi_product_name_matches "CloudStack*" && return $DS_FOUND + return $DS_NOT_FOUND +} + +dscheck_CloudSigma() { + # http://paste.ubuntu.com/23624795/ + dmi_product_name_is "CloudSigma" && return $DS_FOUND + return $DS_NOT_FOUND +} + +check_config() { + # somewhat hackily read config for 'key' in files matching 'files' + # currently does not respect any hierarchy. + local key="$1" files="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" + if [ $# -eq 1 ]; then + files="$bp ${bp}.d/*.cfg" + else + files="$*" + fi + shift + set +f; set -- $files; set +f; + if [ "$1" = "$files" -a ! -f "$1" ]; then + return 1 + fi + local fname="" line="" ret="" found=0 found_fn="" + for fname in "$@"; do + [ -f "$fname" ] || continue + while read line; do + line=${line%%#*} + case "$line" in + $key:\ *|$key:) + ret=${line#*:}; + ret=${ret# }; + found=$((found+1)) + found_fn="$fname";; + esac + done <"$fname" + done + if [ $found -ne 0 ]; then + _RET="$ret" + _RET_fname="$found_fn" + return 0 + fi + return 1 +} + +dscheck_MAAS() { + is_container && return "${DS_NOT_FOUND}" + # heuristic check for ephemeral boot environment + # for maas that do not set 'ci.dsname=' in the ephemeral environment + # these have iscsi root and cloud-config-url on the cmdline. + local maasiqn="iqn.2004-05.com.ubuntu:maas" + case "${DI_KERNEL_CMDLINE}" in + *cloud-config-url=*${maasiqn}*|*${maasiqn}*cloud-config-url=*) + return ${DS_FOUND} + ;; + esac + + # check config files written by maas for installed system. + local confd="${PATH_CLOUD_CONFD}" + local fnmatch="$confd/*maas*.cfg $confd/*kernel_cmdline*.cfg" + if check_config "MAAS" "$fnmatch"; then + return "${DS_FOUND}" + fi + return ${DS_NOT_FOUND} +} + +dscheck_NoCloud() { + local fslabel="cidata" d="" + for d in nocloud nocloud-net; do + check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} + done + if has_fs_with_label "${fslabel}"; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +check_configdrive_v2() { + if has_fs_with_label "config-2"; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +check_configdrive_v1() { + # FIXME: this has to check any file system that is vfat... + # for now, just return not found. + return ${DS_NOT_FOUND} +} + +dscheck_ConfigDrive() { + local ret="" + check_configdrive_v2 + ret=$? + [ $DS_FOUND -eq $ret ] && return $ret + + check_configdrive_v1 +} + +dscheck_DigitalOcean() { + dmi_sys_vendor_is DigitalOcean && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + +dscheck_OpenNebula() { + check_seed_dir opennebula && return ${DS_FOUND} + has_fs_with_label "CONTEXT" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + +dscheck_OVF() { + local p="" + check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" + + has_cdrom || return ${DS_NOT_FOUND} + + # FIXME: currently just return maybe if there is a cdrom + # ovf iso9660 transport does not specify an fs label. + # better would be to check if + return ${DS_MAYBE} +} + +dscheck_Azure() { + # http://paste.ubuntu.com/23630873/ + # $ grep /sr0 /run/blkid/blkid.tab + # /dev/sr0 + # + check_seed_dir azure ovf-env.xml && return ${DS_FOUND} + + [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} + + has_fs_with_label "rd_rdfe_*" && return ${DS_FOUND} + + return ${DS_NOT_FOUND} +} + +dscheck_Bigstep() { + # bigstep is activated by presense of seed file 'url' + check_seed_dir "bigstep" url && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + +dscheck_Ec2() { + # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html + # http://paste.ubuntu.com/23630859/ + local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" + is_container && return ${DS_NOT_FOUND} + # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' + if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && + [ "${uuid#ec2}" != "$uuid" ]; then + return ${DS_FOUND} + fi + + # product uuid and product serial start with case insensitive + local uuid=${DI_DMI_PRODUCT_UUID} serial=${DI_DMI_PRODUCT_SERIAL} + case "$uuid:$serial" in + [Ee][Cc]2*:[Ee][Cc]2) + # both start with ec2, now check for case insenstive equal + nocase_equal "$uuid" "$serial" && return ${DS_FOUND};; + esac + + # search through config files to check for platform + local f="" match="${PATH_CLOUD_CONFD}/*ec2*.cfg" + # look for the key 'platform' (datasource/ec2/look_alike/behavior) + if check_config platform "$match"; then + if [ "$platform" != "Unknown" ]; then + _RET="$name" + return "${DS_FOUND}" + fi + fi + + return ${DS_NOT_FOUND} +} + +dscheck_GCE() { + if dmi_product_name_is "Google Compute Engine"; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +dscheck_OpenStack() { + # the openstack metadata http service + + # if there is a config drive, then do not check metadata + # FIXME: if config drive not in the search list, then we should not + # do this check. + check_configdrive_v2 + if [ $? -eq ${DS_FOUND} ]; then + return ${DS_NOT_FOUND} + fi + if dmi_product_name_is "OpenStack Nova"; then + return ${DS_FOUND} + fi + case "${DI_VIRT}" in + lxc|lxc-libvirt) + # FIXME: This could be container on openstack (nova-lxd) + # or nova-libvirt-lxc + return ${DS_NOT_FOUND} + ;; + esac + + return ${DS_NOT_FOUND} +} + +dscheck_AliYun() { + # aliyun is not enabled by default (LP: #1638931) + # so if we are here, it is because the datasource_list was + # set to include it. Thus, 'maybe'. + return $DS_MAYBE +} + +dscheck_AltCloud() { + # ctype: either the dmi product name, or contents of + # /etc/sysconfig/cloud-info + # if ctype == "vsphere" + # device = device with label 'CDROM' + # elif ctype == "rhev" + # device = /dev/floppy + # then, filesystem on that device must have + # user-data.txt or deltacloud-user-data.txt + local ctype="" dev="" + local match_rhev="[Rr][Hh][Ee][Vv]" + local match_vsphere="[Vv][Ss][Pp][Hh][Ee][Rr][Ee]" + local cinfo="${PATH_ROOT}/etc/sysconfig/cloud-info" + if [ -f "$cinfo" ]; then + read ctype < "$cinfo" + else + ctype="${DI_DMI_PRODUCT_NAME}" + fi + case "$ctype" in + ${match_rhev}) + probe_floppy || return ${DS_NOT_FOUND} + dev="/dev/floppy" + ;; + ${match_vsphere}) + block_dev_with_label CDROM || return ${DS_NOT_FOUND} + dev="$_RET" + ;; + *) return ${DS_NOT_FOUND};; + esac + + # FIXME: need to check $dev for user-data.txt or deltacloud-user-data.txt + : "$dev" + return $DS_MAYBE +} + +dscheck_SmartOS() { + # joyent cloud has two virt types: kvm and container + # on kvm, product name on joyent public cloud shows 'SmartDC HVM' + # on the container platform, uname's version has: BrandZ virtual linux + local smartdc_kver="BrandZ virtual linux" + dmi_product_name_matches "SmartDC*" && return $DS_FOUND + if [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] && + [ "${DI_VIRT}" = "container-other" ]; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +dscheck_None() { + return ${DS_NOT_FOUND} +} + +collect_info() { + read_virt + read_kernel_cmdline + read_uname_info + read_config + read_datasource_list + read_dmi_sys_vendor + read_dmi_product_name + read_dmi_product_serial + read_dmi_product_uuid + read_fs_labels +} + +print_info() { + collect_info + _print_info +} + +_print_info() { + local n="" v="" vars="" + vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" + vars="$vars DMI_PRODUCT_UUID" + vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" + vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" + vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" + vars="$vars DSNAME DSLIST" + vars="$vars MODE REPORT ON_FOUND ON_MAYBE ON_NOTFOUND" + for v in ${vars}; do + eval n='${DI_'"$v"'}' + echo "$v=$n" + done + echo "pid=$$ ppid=$PPID" + is_container && echo "is_container=true" || echo "is_container=false" +} + +write_result() { + local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" + if [ "$DI_REPORT" = "true" ]; then + runcfg="$runcfg.report" + fi + for line in "$@"; do + echo "$line" + done > "$runcfg" + ret=$? + [ $ret -eq 0 ] || { + error "failed to write to ${runcfg}" + return $ret + } + return 0 +} + +found() { + local list="" ds="" + # always we write the None datasource last. + for ds in "$@" None; do + list="${list:+${list}, }$ds" + done + write_result "datasource_list: [ $list ]" + return +} + +trim() { + set -- $* + _RET="$*" +} + +unquote() { + # remove quotes from quoted value + local quote='"' tick="'" + local val="$1" + case "$val" in + ${quote}*${quote}|${tick}*${tick}) + val=${val#?}; val=${val%?};; + esac + _RET="$val" +} + +_read_config() { + # reads config from stdin, modifies _rc scoped environment vars. + # rc_policy and _rc_dsname + local line="" hash="#" ckey="" key="" val="" + while read line; do + line=${line%%${hash}*} + key="${line%%:*}" + + # no : in the line. + [ "$key" = "$line" ] && continue + trim "$key" + key=${_RET} + + val="${line#*:}" + trim "$val" + unquote "${_RET}" + val=${_RET} + case "$key" in + datasource) _rc_dsname="$val";; + policy) _rc_policy="$val";; + esac + done +} + +parse_warn() { + echo "WARN: invalid value '$2' for key '$1'. Using $1=$3." 1>&2 +} + +parse_def_policy() { + local _rc_mode="" _rc_report="" _rc_found="" _rc_maybe="" _rc_notfound="" + local ret="" + parse_policy "$@" + ret=$? + _def_mode=$_rc_mode + _def_report=$_rc_report + _def_found=$_rc_found + _def_maybe=$_rc_maybe + _def_notfound=$_rc_notfound + return $ret +} + +parse_policy() { + # parse_policy(policy, default) + # parse a policy string. sets + # _rc_mode (enable|disable,search) + # _rc_report true|false + # _rc_found first|all + # _rc_maybe all|none + # _rc_notfound enable|disable + local def="" + case "$DI_UNAME_MACHINE" in + # these have dmi data + i?86|x86_64|aarch64) def=${DI_DEFAULT_POLICY};; + *) def=${DI_DEFAULT_POLICY_NO_DMI};; + esac + local policy="$1" + local _def_mode="" _def_report="" _def_found="" _def_maybe="" + local _def_notfound="" + if [ $# -eq 1 ] || [ "$2" != "-" ]; then + def=${2:-${def}} + parse_def_policy "$def" - + fi + + local mode="" report="" found="" maybe="" notfound="" + local oifs="$IFS" tok="" val="" + IFS=","; set -- $policy; IFS="$oifs" + for tok in "$@"; do + val=${tok#*=} + case "$tok" in + report) report=true;; + $DI_ENABLED|$DI_DISABLED|search) mode=$tok;; + found=all|found=first) found=$val;; + maybe=all|maybe=none) maybe=$val;; + notfound=$DI_ENABLED|notfound=$DI_DISABLED) notfound=$val;; + found=*) + parse_warn found "$val" "${_def_found}" + found=${_def_found};; + maybe=*) + parse_warn maybe "$val" "${_def_maybe}" + maybe=${_def_maybe};; + notfound=*) + parse_warn notfound "$val" "${_def_notfound}" + notfound=${_def_notfound};; + esac + done + report=${report:-${_def_report:-false}} + _rc_report=${report} + _rc_mode=${mode:-${_def_mode}} + _rc_found=${found:-${_def_found}} + _rc_maybe=${maybe:-${_def_maybe}} + _rc_notfound=${notfound:-${_def_notfound}} +} + +read_config() { + local config=${PATH_DI_CONFIG} + local _rc_dsname="" _rc_policy="" ret="" + if [ -f "$config" ]; then + _read_config < "$config" + ret=$? + elif [ -e "$config" ]; then + error "$config exists but is not a file!" + ret=1 + fi + local tok="" key="" val="" + for tok in ${DI_KERNEL_CMDLINE}; do + key=${tok%%=*} + val=${tok#*=} + case "$key" in + ci.ds) _rc_dsname="$val";; + ci.datasource) _rc_dsname="$val";; + ci.di.policy) _rc_policy="$val";; + esac + done + + local _rc_mode _rc_report _rc_found _rc_maybe _rc_notfound + parse_policy "${_rc_policy}" + debug 1 "policy loaded: mode=${_rc_mode} report=${_rc_report}" \ + "found=${_rc_found} maybe=${_rc_maybe} notfound=${_rc_notfound}" + DI_MODE=${_rc_mode} + DI_REPORT=${_rc_report} + DI_ON_FOUND=${_rc_found} + DI_ON_MAYBE=${_rc_maybe} + DI_ON_NOTFOUND=${_rc_notfound} + + DI_DSNAME="${_rc_dsname}" + return $ret +} + + +manual_clean_and_existing() { + [ -f "${PATH_VAR_LIB_CLOUD}/instance/manual-clean" ] +} + +main() { + local dscheck="" ret_dis=1 ret_en=0 + collect_info + + if [ ! -e "$PATH_RUN_CI_CFG" ]; then + # the first time the generator is run. + _print_info >> "$DI_LOG" + fi + + case "$DI_MODE" in + $DI_DISABLED) + debug 1 "mode=$DI_DISABLED. returning $ret_dis" + return $ret_dis + ;; + $DI_ENABLED) + debug 1 "mode=$DI_ENABLED. returning $ret_en" + return $ret_en;; + search) :;; + esac + + if [ -n "${DI_DSNAME}" ]; then + debug 1 "datasource '$DI_DSNAME' specified." + found "$DI_DSNAME" + return + fi + + if manual_clean_and_existing; then + debug 1 "manual_cache_clean enabled. Not writing datasource_list." + write_result "# manual_cache_clean." + return + fi + + # if there is only a single entry in $DI_DSLIST + set -- $DI_DSLIST + if [ $# -eq 1 ] || [ $# -eq 2 -a "$2" = "None" ] ; then + debug 1 "single entry in datasource_list ($DI_DSLIST) use that." + found "$@" + return + fi + + local found="" ret="" ds="" maybe="" + for ds in ${DI_DSLIST}; do + dscheck_fn="dscheck_${ds}" + debug 2 "Checking for datasource '$ds' via '$dscheck_fn'" + if ! type "$dscheck_fn" >/dev/null 2>&1; then + warn "No check method '$dscheck_fn' for datasource '$ds'" + continue + fi + $dscheck_fn + ret="$?" + case "$ret" in + $DS_FOUND) + debug 1 "check for '$ds' returned found"; + found="${found} $ds";; + $DS_MAYBE) + debug 1 "check for $ds returned maybe"; + maybe="${maybe} $ds";; + *) debug 2 "check for $ds returned not-found[$ret]";; + esac + done + + debug 2 "found=$found maybe=$maybe" + set -- $found + if [ $# -ne 0 ]; then + if [ $# -eq 1 ]; then + debug 1 "Found single datasource: $1" + else + # found=all + debug 1 "Found $# datasources found=${DI_ON_FOUND}: $*" + if [ "${DI_ON_FOUND}" = "first" ]; then + set -- "$1" + fi + fi + found "$@" + return + fi + + set -- $maybe + if [ $# -ne 0 -a "${DI_ON_MAYBE}" != "none" ]; then + debug 1 "$# datasources returned maybe: $*" + found "$@" + return + fi + + case "$DI_ON_NOTFOUND" in + $DI_DISABLED) + debug 1 "No result. notfound=$DI_DISABLED. returning $ret_dis." + return $ret_dis + ;; + $DI_ENABLED) + debug 1 "notfound=$DI_ENABLED. returning $ret_en" + return $ret_en;; + esac + + error "Unexpected result" + return 3 +} + +noop() { + : +} + +case "${DI_MAIN}" in + main|print_info|noop) "${DI_MAIN}" "$@";; + *) error "unexpected value for DI_MAIN"; exit 1;; +esac + +# vi: syntax=sh ts=4 expandtab -- cgit v1.2.3 From e98709225510ee99ee0269c558c82b3e693e38e5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 4 Feb 2017 02:25:19 +0000 Subject: manual_cache_clean: When manually cleaning touch a file in instance dir. When manual_cache_clean is enabled, write a file to /var/lib/cloud/instance/manual-clean. That file can then be read by ds-identify or another tool to indicate that manual cleaning is in place. --- cloudinit/cmd/main.py | 9 ++++++++- cloudinit/helpers.py | 1 + cloudinit/stages.py | 6 ++++++ 3 files changed, 15 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 65b15edc..7c652574 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -312,8 +312,15 @@ def main_init(name, args): " would allow us to stop early.") else: existing = "check" - if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False): + mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False) + if mcfg: + LOG.debug("manual cache clean set from config") existing = "trust" + else: + mfile = path_helper.get_ipath_cur("manual_clean_marker") + if os.path.exists(mfile): + LOG.debug("manual cache clean found from marker: %s", mfile) + existing = "trust" init.purge_cache() # Delete the non-net file as well diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 4528fb01..38f5f899 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -339,6 +339,7 @@ class Paths(object): "vendordata_raw": "vendor-data.txt", "vendordata": "vendor-data.txt.i", "instance_id": ".instance-id", + "manual_clean_marker": "manual-clean", } # Set when a datasource becomes active self.datasource = ds diff --git a/cloudinit/stages.py b/cloudinit/stages.py index b0552dde..21763810 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -188,6 +188,12 @@ class Init(object): def _write_to_cache(self): if self.datasource is NULL_DATA_SOURCE: return False + if util.get_cfg_option_bool(self.cfg, 'manual_cache_clean', False): + # The empty file in instance/ dir indicates manual cleaning, + # and can be read by ds-identify. + util.write_file( + self.paths.get_ipath_cur("manual_clean_marker"), + omode="w", content="") return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl")) def _get_datasources(self): -- cgit v1.2.3 From 1cd8cfaf1b4d0e3a97c693469d6d987d55014280 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 14 Feb 2017 11:06:36 -0500 Subject: apply the runtime configuration written by ds-identify. When the ds-identify code landed, it started writing /run/cloud.cfg but at the moment, nothing was reading that. The result is that ds-identify only worked to disable cloud-init entirely. --- cloudinit/stages.py | 9 +++++++- tests/unittests/test_data.py | 53 +++++++++++++++++++++++++++++++------------- 2 files changed, 45 insertions(+), 17 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 21763810..5bed9032 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -11,7 +11,8 @@ import sys import six from six.moves import cPickle as pickle -from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG) +from cloudinit.settings import ( + FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG) from cloudinit import handlers @@ -834,6 +835,10 @@ class Modules(object): return self._run_modules(mostly_mods) +def read_runtime_config(): + return util.read_conf(RUN_CLOUD_CONFIG) + + def fetch_base_config(): return util.mergemanydict( [ @@ -841,6 +846,8 @@ def fetch_base_config(): util.get_builtin_cfg(), # Anything in your conf.d or 'default' cloud.cfg location. util.read_conf_with_confd(CLOUD_CONFIG), + # runtime config + read_runtime_config(), # Kernel/cmdline parameters override system config util.read_conf_from_cmdline(), ], reverse=True) diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 4092d9ca..4ad86bb6 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -564,12 +564,12 @@ class TestConvertString(helpers.TestCase): class TestFetchBaseConfig(helpers.TestCase): - - def test_only_builtin_gets_builtin2(self): + def test_only_builtin_gets_builtin(self): ret = helpers.wrap_and_call( - 'cloudinit.stages.util', - {'read_conf_with_confd': None, - 'read_conf_from_cmdline': None}, + 'cloudinit.stages', + {'util.read_conf_with_confd': None, + 'util.read_conf_from_cmdline': None, + 'read_runtime_config': {'return_value': {}}}, stages.fetch_base_config) self.assertEqual(util.get_builtin_cfg(), ret) @@ -578,9 +578,11 @@ class TestFetchBaseConfig(helpers.TestCase): test_key = sorted(builtin)[0] test_value = 'test' ret = helpers.wrap_and_call( - 'cloudinit.stages.util', - {'read_conf_with_confd': {'return_value': {test_key: test_value}}, - 'read_conf_from_cmdline': None}, + 'cloudinit.stages', + {'util.read_conf_with_confd': + {'return_value': {test_key: test_value}}, + 'util.read_conf_from_cmdline': None, + 'read_runtime_config': {'return_value': {}}}, stages.fetch_base_config) self.assertEqual(ret.get(test_key), test_value) builtin[test_key] = test_value @@ -592,25 +594,44 @@ class TestFetchBaseConfig(helpers.TestCase): test_value = 'test' cmdline = {test_key: test_value} ret = helpers.wrap_and_call( - 'cloudinit.stages.util', - {'read_conf_from_cmdline': {'return_value': cmdline}, - 'read_conf_with_confd': None}, + 'cloudinit.stages', + {'util.read_conf_from_cmdline': {'return_value': cmdline}, + 'util.read_conf_with_confd': None, + 'read_runtime_config': None}, stages.fetch_base_config) self.assertEqual(ret.get(test_key), test_value) builtin[test_key] = test_value self.assertEqual(ret, builtin) - def test_cmdline_overrides_conf_d_and_defaults(self): + def test_cmdline_overrides_confd_runtime_and_defaults(self): builtin = {'key1': 'value0', 'key3': 'other2'} conf_d = {'key1': 'value1', 'key2': 'other1'} cmdline = {'key3': 'other3', 'key2': 'other2'} + runtime = {'key3': 'runtime3'} ret = helpers.wrap_and_call( - 'cloudinit.stages.util', - {'read_conf_with_confd': {'return_value': conf_d}, - 'get_builtin_cfg': {'return_value': builtin}, - 'read_conf_from_cmdline': {'return_value': cmdline}}, + 'cloudinit.stages', + {'util.read_conf_with_confd': {'return_value': conf_d}, + 'util.get_builtin_cfg': {'return_value': builtin}, + 'read_runtime_config': {'return_value': runtime}, + 'util.read_conf_from_cmdline': {'return_value': cmdline}}, stages.fetch_base_config) self.assertEqual(ret, {'key1': 'value1', 'key2': 'other2', 'key3': 'other3'}) + def test_order_precedence_is_builtin_system_runtime_cmdline(self): + builtin = {'key1': 'builtin0', 'key3': 'builtin3'} + conf_d = {'key1': 'confd1', 'key2': 'confd2', 'keyconfd1': 'kconfd1'} + runtime = {'key1': 'runtime1', 'key2': 'runtime2'} + cmdline = {'key1': 'cmdline1'} + ret = helpers.wrap_and_call( + 'cloudinit.stages', + {'util.read_conf_with_confd': {'return_value': conf_d}, + 'util.get_builtin_cfg': {'return_value': builtin}, + 'util.read_conf_from_cmdline': {'return_value': cmdline}, + 'read_runtime_config': {'return_value': runtime}, + }, + stages.fetch_base_config) + self.assertEqual(ret, {'key1': 'cmdline1', 'key2': 'runtime2', + 'key3': 'builtin3', 'keyconfd1': 'kconfd1'}) + # vi: ts=4 expandtab -- cgit v1.2.3 From 91be1d189d9348e81a4c4f1f7d5fc255df1ce6d1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 16 Feb 2017 21:13:38 -0500 Subject: ec2_utils: fix MetadataLeafDecoder that returned bytes on empty the MetadataLeafDecoder would return a bytes value b'' instead of an empty string if the value of a key was empty. In all other cases the value would be a string. This was discovered when trying to json.dumps(get_instance_metadata()) on a recent OpenStack, where the value of 'public-ipv4' was empty. The attempt to dump that with json would raise TypeError: b'' is not JSON serializable --- cloudinit/ec2_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 0c16ae47..13691549 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -28,7 +28,7 @@ class MetadataLeafDecoder(object): def __call__(self, field, blob): if not blob: - return blob + return '' try: blob = util.decode_binary(blob) except UnicodeDecodeError: -- cgit v1.2.3 From f81d6c7bde2af206d449de593b35773068270c84 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 17 Feb 2017 08:55:05 -0500 Subject: net: correct errors in cloudinit/net/sysconfig.py There were some logic errors in sysconfig.py that appear to be the result of accidentally typing "iface" where it should have been "iface_cfg". This patch corrects those problems so that the module can run successfully. LP: #1665441 Resolves: rhbz#1389530 --- cloudinit/net/sysconfig.py | 4 +-- tests/unittests/test_net.py | 87 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 88 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 9be74070..19e220ae 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -283,10 +283,10 @@ class Renderer(renderer.Renderer): cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) elif len(iface_subnets) > 1: for i, iface_subnet in enumerate(iface_subnets, - start=len(iface.children)): + start=len(iface_cfg.children)): iface_sub_cfg = iface_cfg.copy() iface_sub_cfg.name = "%s:%s" % (iface_name, i) - iface.children.append(iface_sub_cfg) + iface_cfg.children.append(iface_sub_cfg) cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet) @classmethod diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b77d277a..1b6288d4 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -159,6 +159,91 @@ NETMASK0=0.0.0.0 ; Created by cloud-init on instance boot automatically, do not edit. ; nameserver 172.19.0.12 +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + }, + { + 'in_data': { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "public-ipv4", + "type": "ipv4", "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [{ + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }], + "ip_address": "172.19.1.34", "id": "network0" + },{ + "network_id": "private-ipv4", + "type": "ipv4", "netmask": "255.255.255.0", + "link": "tap1a81968a-79", + "routes": [], + "ip_address": "10.0.0.10", "id": "network1" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + }, + 'in_macs': { + 'fa:16:3e:ed:9a:59': 'eth0', + }, + 'out_sysconfig': [ + ('etc/sysconfig/network-scripts/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEVICE=eth0 +HWADDR=fa:16:3e:ed:9a:59 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/sysconfig/network-scripts/ifcfg-eth0:0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=static +DEFROUTE=yes +DEVICE=eth0:0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +NETMASK=255.255.252.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/sysconfig/network-scripts/ifcfg-eth0:1', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=static +DEVICE=eth0:1 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=10.0.0.10 +NETMASK=255.255.255.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 """.lstrip()), ('etc/udev/rules.d/70-persistent-net.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', @@ -529,8 +614,8 @@ USERCTL=no self.assertEqual(expected_content, content) def test_openstack_rendering_samples(self): - render_dir = self.tmp_dir() for os_sample in OS_SAMPLES: + render_dir = self.tmp_dir() ex_input = os_sample['in_data'] ex_mac_addrs = os_sample['in_macs'] network_cfg = openstack.convert_net_json( -- cgit v1.2.3 From da25385d0613b373c5746761748782ca1e157d10 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Feb 2017 12:05:38 -0500 Subject: flake8: fix flake8 complaints in previous commit. --- cloudinit/net/sysconfig.py | 6 +++--- tests/unittests/test_net.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 19e220ae..6e7739fb 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -282,12 +282,12 @@ class Renderer(renderer.Renderer): if len(iface_subnets) == 1: cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) elif len(iface_subnets) > 1: - for i, iface_subnet in enumerate(iface_subnets, - start=len(iface_cfg.children)): + for i, isubnet in enumerate(iface_subnets, + start=len(iface_cfg.children)): iface_sub_cfg = iface_cfg.copy() iface_sub_cfg.name = "%s:%s" % (iface_name, i) iface_cfg.children.append(iface_sub_cfg) - cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet) + cls._render_subnet(iface_sub_cfg, route_cfg, isubnet) @classmethod def _render_bond_interfaces(cls, network_state, iface_contents): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 1b6288d4..4b03ff72 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -177,7 +177,7 @@ nameserver 172.19.0.12 "gateway": "172.19.3.254", }], "ip_address": "172.19.1.34", "id": "network0" - },{ + }, { "network_id": "private-ipv4", "type": "ipv4", "netmask": "255.255.255.0", "link": "tap1a81968a-79", -- cgit v1.2.3 From 9bb55c6c45bcc5e310cf7e4d42cad53759dcca15 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Feb 2017 17:15:27 -0500 Subject: DatasourceEc2: add warning message when not on AWS. Based on the setting Datasource/Ec2/strict_id, the datasource will now warn once per instance. --- cloudinit/sources/DataSourceAliYun.py | 4 + cloudinit/sources/DataSourceEc2.py | 178 +++++++++++++++++++++++++++++++++- tools/ds-identify | 40 ++++++-- 3 files changed, 211 insertions(+), 11 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 2d00255c..9debe947 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -22,6 +22,10 @@ class DataSourceAliYun(EC2.DataSourceEc2): def get_public_ssh_keys(self): return parse_public_keys(self.metadata.get('public-keys', {})) + @property + def cloud_platform(self): + return EC2.Platforms.ALIYUN + def parse_public_keys(public_keys): keys = [] diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index c657fd09..26da263a 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -9,6 +9,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import textwrap import time from cloudinit import ec2_utils as ec2 @@ -22,12 +23,23 @@ LOG = logging.getLogger(__name__) # Which version we are requesting of the ec2 metadata apis DEF_MD_VERSION = '2009-04-04' +STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") +STRICT_ID_DEFAULT = "warn" + + +class Platforms(object): + ALIYUN = "AliYun" + AWS = "AWS" + SEEDED = "Seeded" + UNKNOWN = "Unknown" + class DataSourceEc2(sources.DataSource): # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the # following may be discarded if they do not resolve metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"] + _cloud_platform = None def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -41,8 +53,18 @@ class DataSourceEc2(sources.DataSource): self.userdata_raw = seed_ret['user-data'] self.metadata = seed_ret['meta-data'] LOG.debug("Using seeded ec2 data from %s", self.seed_dir) + self._cloud_platform = Platforms.SEEDED return True + strict_mode, _sleep = read_strict_mode( + util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, + STRICT_ID_DEFAULT), ("warn", None)) + + LOG.debug("strict_mode: %s, cloud_platform=%s", + strict_mode, self.cloud_platform) + if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: + return False + try: if not self.wait_for_metadata_service(): return False @@ -51,8 +73,8 @@ class DataSourceEc2(sources.DataSource): ec2.get_instance_userdata(self.api_ver, self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) - LOG.debug("Crawl of metadata service took %s seconds", - int(time.time() - start_time)) + LOG.debug("Crawl of metadata service took %.3f seconds", + time.time() - start_time) return True except Exception: util.logexc(LOG, "Failed reading from metadata address %s", @@ -190,6 +212,158 @@ class DataSourceEc2(sources.DataSource): return az[:-1] return None + @property + def cloud_platform(self): + if self._cloud_platform is None: + self._cloud_platform = identify_platform() + return self._cloud_platform + + def activate(self, cfg, is_new_instance): + if not is_new_instance: + return + if self.cloud_platform == Platforms.UNKNOWN: + warn_if_necessary( + util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT)) + + +def read_strict_mode(cfgval, default): + try: + return parse_strict_mode(cfgval) + except ValueError as e: + LOG.warn(e) + return default + + +def parse_strict_mode(cfgval): + # given a mode like: + # true, false, warn,[sleep] + # return tuple with string mode (true|false|warn) and sleep. + if cfgval is True: + return 'true', None + if cfgval is False: + return 'false', None + + if not cfgval: + return 'warn', 0 + + mode, _, sleep = cfgval.partition(",") + if mode not in ('true', 'false', 'warn'): + raise ValueError( + "Invalid mode '%s' in strict_id setting '%s': " + "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)) + + if sleep: + try: + sleep = int(sleep) + except ValueError: + raise ValueError("Invalid sleep '%s' in strict_id setting '%s': " + "not an integer" % (sleep, cfgval)) + else: + sleep = None + + return mode, sleep + + +def warn_if_necessary(cfgval): + try: + mode, sleep = parse_strict_mode(cfgval) + except ValueError as e: + LOG.warn(e) + return + + if mode == "false": + return + + show_warning(sleep) + + +def show_warning(sleep): + message = textwrap.dedent(""" + **************************************************************** + # This system is using the EC2 Metadata Service, but does not # + # appear to be running on Amazon EC2 or one of cloud-init's # + # known platforms that provide a EC2 Metadata service. In the # + # future, cloud-init may stop reading metadata from the EC2 # + # Metadata Service unless the platform can be identified # + # # + # If you are seeing this message, please file a bug against # + # cloud-init at https://bugs.launchpad.net/cloud-init/+filebug # + # Make sure to include the cloud provider your instance is # + # running on. # + # # + # For more information see # + # https://bugs.launchpad.net/cloud-init/+bug/1660385 # + # # + # After you have filed a bug, you can disable this warning by # + # launching your instance with the cloud-config below, or # + # putting that content into # + # /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg # + # # + # #cloud-config # + # datasource: # + # Ec2: # + # strict_id: false # + # # + """) + closemsg = "" + if sleep: + closemsg = " [sleeping for %d seconds] " % sleep + message += closemsg.center(64, "*") + print(message) + LOG.warn(message) + if sleep: + time.sleep(sleep) + + +def identify_aws(data): + # data is a dictionary returned by _collect_platform_data. + if (data['uuid'].startswith('ec2') and + (data['uuid_source'] == 'hypervisor' or + data['uuid'] == data['serial'])): + return Platforms.AWS + + return None + + +def identify_platform(): + # identify the platform and return an entry in Platforms. + data = _collect_platform_data() + checks = (identify_aws, lambda x: Platforms.UNKNOWN) + for checker in checks: + try: + result = checker(data) + if result: + return result + except Exception as e: + LOG.warn("calling %s with %s raised exception: %s", + checker, data, e) + + +def _collect_platform_data(): + # returns a dictionary with all lower case values: + # uuid: system-uuid from dmi or /sys/hypervisor + # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' + # serial: dmi 'system-serial-number' (/sys/.../product_serial) + data = {} + try: + uuid = util.load_file("/sys/hypervisor/uuid").strip() + data['uuid_source'] = 'hypervisor' + except Exception: + uuid = util.read_dmi_data('system-uuid') + data['uuid_source'] = 'dmi' + + if uuid is None: + uuid = '' + data['uuid'] = uuid.lower() + + serial = util.read_dmi_data('system-serial-number') + if serial is None: + serial = '' + + data['serial'] = serial.lower() + + return data + # Used to match classes to dependencies datasources = [ diff --git a/tools/ds-identify b/tools/ds-identify index bfb55ed1..dfa856ff 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -635,28 +635,50 @@ ec2_read_strict_setting() { return 0 } -dscheck_Ec2() { - # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html - # http://paste.ubuntu.com/23630859/ - - check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} - is_container && return ${DS_NOT_FOUND} +ec2_identify_platform() { + local default="$1" + local serial="${DI_DMI_PRODUCT_SERIAL}" + # AWS http://docs.aws.amazon.com/AWSEC2/ + # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && [ "${uuid#ec2}" != "$uuid" ]; then - return ${DS_FOUND} + _RET="AWS" + return 0 fi # product uuid and product serial start with case insensitive - local uuid="${DI_DMI_PRODUCT_UUID}" serial="${DI_DMI_PRODUCT_SERIAL}" + local uuid="${DI_DMI_PRODUCT_UUID}" case "$uuid:$serial" in [Ee][Cc]2*:[Ee][Cc]2) # both start with ec2, now check for case insenstive equal - nocase_equal "$uuid" "$serial" && return ${DS_FOUND};; + nocase_equal "$uuid" "$serial" && + { _RET="AWS"; return 0; };; esac + _RET="$default" + return 0; +} + +dscheck_Ec2() { + check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} + is_container && return ${DS_NOT_FOUND} + + local unknown="Unknown" platform="" + if ec2_identify_platform "$unknown"; then + platform="$_RET" + else + warn "Failed to identify ec2 platform. Using '$unknown'." + platform=$unknown + fi + + debug 1 "ec2 platform is '$platform'." + if [ "$platform" != "$unknown" ]; then + return $DS_FOUND + fi + local default="true" if ec2_read_strict_setting "$default"; then strict="$_RET" -- cgit v1.2.3 From 5dd5b2cb539a84ed59f2b3181020d2bd18989718 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Feb 2017 14:19:20 -0500 Subject: Identify Brightbox as an Ec2 datasource user. Brightbox will identify their platform to the guest by setting the product serial to a string that ends with 'brightbox.com'. LP: #1661693 --- cloudinit/sources/DataSourceEc2.py | 8 +++++++- tools/ds-identify | 5 +++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 26da263a..c7df8060 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -30,6 +30,7 @@ STRICT_ID_DEFAULT = "warn" class Platforms(object): ALIYUN = "AliYun" AWS = "AWS" + BRIGHTBOX = "Brightbox" SEEDED = "Seeded" UNKNOWN = "Unknown" @@ -325,10 +326,15 @@ def identify_aws(data): return None +def identify_brightbox(data): + if data['serial'].endswith('brightbox.com'): + return Platforms.BRIGHTBOX + + def identify_platform(): # identify the platform and return an entry in Platforms. data = _collect_platform_data() - checks = (identify_aws, lambda x: Platforms.UNKNOWN) + checks = (identify_aws, identify_brightbox, lambda x: Platforms.UNKNOWN) for checker in checks: try: result = checker(data) diff --git a/tools/ds-identify b/tools/ds-identify index dfa856ff..c39956fc 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -639,6 +639,11 @@ ec2_identify_platform() { local default="$1" local serial="${DI_DMI_PRODUCT_SERIAL}" + # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693 + case "$serial" in + *brightbox.com) _RET="Brightbox"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" -- cgit v1.2.3 From ade8c2e0266b020089145075e8236b95c000a3cb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 17:14:45 -0500 Subject: Move warning functionality to cloudinit/warnings.py This moves the warning code that was added specifically for EC2 into a generic path at cloudinit/warnings.py. It also adds support for writing warning files into the warnings directory to be shown by Z99-cloudinit-warnings.sh. --- cloudinit/helpers.py | 1 + cloudinit/sources/DataSourceEc2.py | 47 ++------------- cloudinit/warnings.py | 115 +++++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 42 deletions(-) create mode 100644 cloudinit/warnings.py (limited to 'cloudinit') diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 38f5f899..7435d58d 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -340,6 +340,7 @@ class Paths(object): "vendordata": "vendor-data.txt.i", "instance_id": ".instance-id", "manual_clean_marker": "manual-clean", + "warnings": "warnings", } # Set when a datasource becomes active self.datasource = ds diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index c7df8060..6f01a139 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -9,7 +9,6 @@ # This file is part of cloud-init. See LICENSE file for license information. import os -import textwrap import time from cloudinit import ec2_utils as ec2 @@ -17,6 +16,7 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util +from cloudinit import warnings LOG = logging.getLogger(__name__) @@ -224,7 +224,8 @@ class DataSourceEc2(sources.DataSource): return if self.cloud_platform == Platforms.UNKNOWN: warn_if_necessary( - util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT)) + util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), + cfg) def read_strict_mode(cfgval, default): @@ -265,7 +266,7 @@ def parse_strict_mode(cfgval): return mode, sleep -def warn_if_necessary(cfgval): +def warn_if_necessary(cfgval, cfg): try: mode, sleep = parse_strict_mode(cfgval) except ValueError as e: @@ -275,45 +276,7 @@ def warn_if_necessary(cfgval): if mode == "false": return - show_warning(sleep) - - -def show_warning(sleep): - message = textwrap.dedent(""" - **************************************************************** - # This system is using the EC2 Metadata Service, but does not # - # appear to be running on Amazon EC2 or one of cloud-init's # - # known platforms that provide a EC2 Metadata service. In the # - # future, cloud-init may stop reading metadata from the EC2 # - # Metadata Service unless the platform can be identified # - # # - # If you are seeing this message, please file a bug against # - # cloud-init at https://bugs.launchpad.net/cloud-init/+filebug # - # Make sure to include the cloud provider your instance is # - # running on. # - # # - # For more information see # - # https://bugs.launchpad.net/cloud-init/+bug/1660385 # - # # - # After you have filed a bug, you can disable this warning by # - # launching your instance with the cloud-config below, or # - # putting that content into # - # /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg # - # # - # #cloud-config # - # datasource: # - # Ec2: # - # strict_id: false # - # # - """) - closemsg = "" - if sleep: - closemsg = " [sleeping for %d seconds] " % sleep - message += closemsg.center(64, "*") - print(message) - LOG.warn(message) - if sleep: - time.sleep(sleep) + warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep) def identify_aws(data): diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py new file mode 100644 index 00000000..77c092f9 --- /dev/null +++ b/cloudinit/warnings.py @@ -0,0 +1,115 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import util + +import os +import time + +LOG = logging.getLogger() + +WARNINGS = { + 'non_ec2_md': """ +This system is using the EC2 Metadata Service, but does not appear to +be running on Amazon EC2 or one of cloud-init's known platforms that +provide a EC2 Metadata service. In the future, cloud-init may stop +reading metadata from the EC2 Metadata Service unless the platform can +be identified. + +If you are seeing this message, please file a bug against +cloud-init at + https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid +Make sure to include the cloud provider your instance is +running on. + +For more information see + https://bugs.launchpad.net/bugs/1660385 + +After you have filed a bug, you can disable this warning by +launching your instance with the cloud-config below, or +putting that content into + /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg + +#cloud-config +datasource: + Ec2: + strict_id: false""", +} + + +def _get_warn_dir(cfg): + paths = helpers.Paths( + path_cfgs=cfg.get('system_info', {}).get('paths', {})) + return paths.get_ipath_cur('warnings') + + +def _load_warn_cfg(cfg, name, mode=True, sleep=None): + # parse cfg['warnings']['name'] returning boolean, sleep + # expected value is form of: + # (on|off|true|false|sleep)[,sleeptime] + # boolean True == on, False == off + default = (mode, sleep) + if not cfg or not isinstance(cfg, dict): + return default + + ncfg = util.get_cfg_by_path(cfg, ('warnings', name)) + if ncfg is None: + return default + + if ncfg in ("on", "true", True): + return True, None + + if ncfg in ("off", "false", False): + return False, None + + mode, _, csleep = ncfg.partition(",") + if mode != "sleep": + return default + + if csleep: + try: + sleep = int(csleep) + except ValueError: + return default + + return True, sleep + + +def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs): + # kwargs are used for .format of the message. + # sleep and mode are default values used if + # cfg['warnings']['name'] is not present. + if cfg is None: + cfg = {} + + mode, sleep = _load_warn_cfg(cfg, name, mode=mode, sleep=sleep) + if not mode: + return + + msg = WARNINGS[name].format(**kwargs) + msgwidth = 70 + linewidth = msgwidth + 4 + + fmt = "# %%-%ds #" % msgwidth + topline = "*" * linewidth + "\n" + fmtlines = [] + for line in msg.strip("\n").splitlines(): + fmtlines.append(fmt % line) + + closeline = topline + if sleep: + sleepmsg = " [sleeping for %d seconds] " % sleep + closeline = sleepmsg.center(linewidth, "*") + "\n" + + util.write_file( + os.path.join(_get_warn_dir(cfg), name), + topline + "\n".join(fmtlines) + "\n" + topline) + + LOG.warn(topline + "\n".join(fmtlines) + "\n" + closeline) + + if sleep: + LOG.debug("sleeping %d seconds for warning '%s'" % (sleep, name)) + time.sleep(sleep) + +# vi: ts=4 expandtab -- cgit v1.2.3 From 79db2e2436d91510aceb8c036c4a945362c85a52 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 1 Mar 2017 15:50:40 -0500 Subject: Support warning if the used datasource is not in ds-identify's list. If ds-identify is in report mode, and the datasource that is found is not in the list, then warn the user of this situation. --- cloudinit/cmd/main.py | 39 +++++++++++++++++++++++++++++++++++++++ cloudinit/warnings.py | 24 ++++++++++++++++++++++++ 2 files changed, 63 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 7c652574..6ff4e1c0 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -29,6 +29,7 @@ from cloudinit import templater from cloudinit import url_helper from cloudinit import util from cloudinit import version +from cloudinit import warnings from cloudinit import reporting from cloudinit.reporting import events @@ -413,10 +414,48 @@ def main_init(name, args): # give the activated datasource a chance to adjust init.activate_datasource() + di_report_warn(datasource=init.datasource, cfg=init.cfg) + # Stage 10 return (init.datasource, run_module_section(mods, name, name)) +def di_report_warn(datasource, cfg): + if 'di_report' not in cfg: + LOG.debug("no di_report found in config.") + return + + dicfg = cfg.get('di_report', {}) + if not isinstance(dicfg, dict): + LOG.warn("di_report config not a dictionary: %s", dicfg) + return + + dslist = dicfg.get('datasource_list') + if dslist is None: + LOG.warn("no 'datasource_list' found in di_report.") + return + elif not isinstance(dslist, list): + LOG.warn("di_report/datasource_list not a list: %s", dslist) + return + + # ds.__module__ is like cloudinit.sources.DataSourceName + # where Name is the thing that shows up in datasource_list. + modname = datasource.__module__.rpartition(".")[2] + if modname.startswith(sources.DS_PREFIX): + modname = modname[len(sources.DS_PREFIX):] + else: + LOG.warn("Datasource '%s' came from unexpected module '%s'.", + datasource, modname) + + if modname in dslist: + LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", + datasource, modname, dslist) + return + + warnings.show_warning('dsid_missing_source', cfg, + source=modname, dslist=str(dslist)) + + def main_modules(action_name, args): name = args.mode # Cloud-init 'modules' stages are broken up into the following sub-stages diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py index 77c092f9..3206d4e9 100644 --- a/cloudinit/warnings.py +++ b/cloudinit/warnings.py @@ -35,6 +35,30 @@ putting that content into datasource: Ec2: strict_id: false""", + 'dsid_missing_source': """ +A new feature in cloud-init identified possible datasources for +this system as: + {dslist} +However, the datasource used was: {source} + +In the future, cloud-init will only attempt to use datasources that +are identified or specifically configured. +For more information see + https://bugs.launchpad.net/bugs/1669675 + +If you are seeing this message, please file a bug against +cloud-init at + https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid +Make sure to include the cloud provider your instance is +running on. + +After you have filed a bug, you can disable this warning by launching +your instance with the cloud-config below, or putting that content +into /etc/cloud/cloud.cfg.d/99-warnings.cfg + +#cloud-config +warnings: + dsid_missing_source: off""", } -- cgit v1.2.3