From 46cb6716c27d4496ce3d2bea7684803f522f277d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 22 Feb 2018 16:18:02 -0500 Subject: subp: Fix subp usage with non-ascii characters when no system locale. If python starts up without a locale set, then its default encoding ends up set as ascii. That is not easily changed with the likes of setlocale. In order to avoid UnicodeDecodeErrors cloud-init will encode to bytes a python3 string or python2 basestring so that the values passed to Popen are already bytes. LP: #1751051 --- cloudinit/util.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/util.py b/cloudinit/util.py index 338fb971..5a919cfe 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1865,8 +1865,13 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, if not isinstance(data, bytes): data = data.encode() + # Popen converts entries in the arguments array from non-bytes to bytes. + # When locale is unset it may use ascii for that encoding which can + # cause UnicodeDecodeErrors. (LP: #1751051) + bytes_args = [x if isinstance(x, six.binary_type) else x.encode("utf-8") + for x in args] try: - sp = subprocess.Popen(args, stdout=stdout, + sp = subprocess.Popen(bytes_args, stdout=stdout, stderr=stderr, stdin=stdin, env=env, shell=shell) (out, err) = sp.communicate(data) -- cgit v1.2.3 From d67636f7cc3df3df69b438c27ae0cd8a4416048d Mon Sep 17 00:00:00 2001 From: Romanos Skiadas Date: Thu, 22 Feb 2018 16:40:05 -0500 Subject: Implement puppet 4 support Make puppet installation more configurable by: - Adding a package_name parameter - Exposing the puppet configuration and puppet ssl directories as parameters. These default to the previous values if unset, but can be set to the new values puppetlabs requires for its puppet 4.x packages. This way puppet 4 configuration is now possible. LP: #1446804 --- cloudinit/config/cc_puppet.py | 54 ++++++++++++++++++++++++++++++++----------- cloudinit/util.py | 2 +- 2 files changed, 41 insertions(+), 15 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 28b1d568..57a170fb 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -21,6 +21,13 @@ under ``version``, and defaults to ``none``, which selects the latest version in the repos. If the ``puppet`` config key exists in the config archive, this module will attempt to start puppet even if no installation was performed. +The module also provides keys for configuring the new puppet 4 paths and +installing the puppet package from the puppetlabs repositories: +https://docs.puppet.com/puppet/4.2/reference/whered_it_go.html +The keys are ``package_name``, ``conf_file`` and ``ssl_dir``. If unset, their +values will default to ones that work with puppet 3.x and with distributions +that ship modified puppet 4.x that uses the old paths. + Puppet configuration can be specified under the ``conf`` key. The configuration is specified as a dictionary containing high-level ``
`` keys and lists of ``=`` pairs within each section. Each section @@ -44,6 +51,9 @@ in pem format as a multi-line string (using the ``|`` yaml notation). puppet: install: version: + conf_file: '/etc/puppet/puppet.conf' + ssl_dir: '/var/lib/puppet/ssl' + package_name: 'puppet' conf: agent: server: "puppetmaster.example.org" @@ -63,9 +73,17 @@ from cloudinit import helpers from cloudinit import util PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' -PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/' PUPPET_SSL_DIR = '/var/lib/puppet/ssl' -PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem' +PUPPET_PACKAGE_NAME = 'puppet' + + +class PuppetConstants(object): + + def __init__(self, puppet_conf_file, puppet_ssl_dir, log): + self.conf_path = puppet_conf_file + self.ssl_dir = puppet_ssl_dir + self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") + self.ssl_cert_path = os.path.join(self.ssl_cert_dir, "ca.pem") def _autostart_puppet(log): @@ -92,22 +110,29 @@ def handle(name, cfg, cloud, log, _args): return puppet_cfg = cfg['puppet'] - # Start by installing the puppet package if necessary... install = util.get_cfg_option_bool(puppet_cfg, 'install', True) version = util.get_cfg_option_str(puppet_cfg, 'version', None) + package_name = util.get_cfg_option_str( + puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME) + conf_file = util.get_cfg_option_str( + puppet_cfg, 'conf_file', PUPPET_CONF_PATH) + ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) + + p_constants = PuppetConstants(conf_file, ssl_dir, log) if not install and version: log.warn(("Puppet install set false but version supplied," " doing nothing.")) elif install: log.debug(("Attempting to install puppet %s,"), version if version else 'latest') - cloud.distro.install_packages(('puppet', version)) + + cloud.distro.install_packages((package_name, version)) # ... and then update the puppet configuration if 'conf' in puppet_cfg: # Add all sections from the conf object to puppet.conf - contents = util.load_file(PUPPET_CONF_PATH) + contents = util.load_file(p_constants.conf_path) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to @@ -116,19 +141,19 @@ def handle(name, cfg, cloud, log, _args): cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), - filename=PUPPET_CONF_PATH) + filename=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg['conf'].items(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership - util.ensure_dir(PUPPET_SSL_DIR, 0o771) - util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') - util.ensure_dir(PUPPET_SSL_CERT_DIR) - util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') - util.write_file(PUPPET_SSL_CERT_PATH, cfg) - util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') + util.ensure_dir(p_constants.ssl_dir, 0o771) + util.chownbyname(p_constants.ssl_dir, 'puppet', 'root') + util.ensure_dir(p_constants.ssl_cert_dir) + util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root') + util.write_file(p_constants.ssl_cert_path, cfg) + util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root') else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed @@ -144,8 +169,9 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH)) - util.write_file(PUPPET_CONF_PATH, puppet_config.stringify()) + util.rename(p_constants.conf_path, "%s.old" + % (p_constants.conf_path)) + util.write_file(p_constants.conf_path, puppet_config.stringify()) # Set it up so it autostarts _autostart_puppet(log) diff --git a/cloudinit/util.py b/cloudinit/util.py index 5a919cfe..02dc2ce8 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1746,7 +1746,7 @@ def chmod(path, mode): def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False): """ Writes a file with the given content and sets the file mode as specified. - Resotres the SELinux context if possible. + Restores the SELinux context if possible. @param filename: The full path of the file to write. @param content: The content to write to the file. -- cgit v1.2.3 From ffc6917aa0b97811c1e8503cd4cff9f11c15def1 Mon Sep 17 00:00:00 2001 From: Rémy Léone Date: Thu, 1 Mar 2018 18:23:32 +0100 Subject: Change some list creation and population to literal. This will provide a small performance improvement and shorter code. --- cloudinit/cmd/main.py | 10 ++++------ cloudinit/config/cc_keys_to_console.py | 4 +--- cloudinit/config/cc_ssh_authkey_fingerprints.py | 9 ++++----- cloudinit/distros/arch.py | 5 +---- cloudinit/distros/opensuse.py | 5 ++--- cloudinit/sources/DataSourceOpenNebula.py | 5 +---- cloudinit/stages.py | 3 +-- cloudinit/util.py | 3 +-- 8 files changed, 15 insertions(+), 29 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index d2f1b778..fcddd75c 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -215,12 +215,10 @@ def main_init(name, args): if args.local: deps = [sources.DEP_FILESYSTEM] - early_logs = [] - early_logs.append( - attempt_cmdline_url( - path=os.path.join("%s.d" % CLOUD_CONFIG, - "91_kernel_cmdline_url.cfg"), - network=not args.local)) + early_logs = [attempt_cmdline_url( + path=os.path.join("%s.d" % CLOUD_CONFIG, + "91_kernel_cmdline_url.cfg"), + network=not args.local)] # Cloud-init 'init' stage is broken up into the following sub-stages # 1. Ensure that the init object fetches its config without errors diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index efedd4ae..aff4010e 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -63,9 +63,7 @@ def handle(name, cfg, cloud, log, _args): ["ssh-dss"]) try: - cmd = [helper_path] - cmd.append(','.join(fp_blacklist)) - cmd.append(','.join(key_blacklist)) + cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] (stdout, _stderr) = util.subp(cmd) util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 35d8c57f..98b0e665 100755 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -77,11 +77,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', tbl = SimpleTable(tbl_fields) for entry in key_entries: if _is_printable_key(entry): - row = [] - row.append(entry.keytype or '-') - row.append(_gen_fingerprint(entry.base64, hash_meth) or '-') - row.append(entry.options or '-') - row.append(entry.comment or '-') + row = [entry.keytype or '-', + _gen_fingerprint(entry.base64, hash_meth) or '-', + entry.options or '-', + entry.comment or '-'] tbl.add_row(row) authtbl_s = tbl.get_string() authtbl_lines = authtbl_s.splitlines() diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index f87a3432..b814c8ba 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -129,11 +129,8 @@ class Distro(distros.Distro): if pkgs is None: pkgs = [] - cmd = ['pacman'] + cmd = ['pacman', "-Sy", "--quiet", "--noconfirm"] # Redirect output - cmd.append("-Sy") - cmd.append("--quiet") - cmd.append("--noconfirm") if args and isinstance(args, str): cmd.append(args) diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index a219e9fb..162dfa05 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -67,11 +67,10 @@ class Distro(distros.Distro): if pkgs is None: pkgs = [] - cmd = ['zypper'] # No user interaction possible, enable non-interactive mode - cmd.append('--non-interactive') + cmd = ['zypper', '--non-interactive'] - # Comand is the operation, such as install + # Command is the operation, such as install if command == 'upgrade': command = 'update' cmd.append(command) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index ce47b6bd..9450835e 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -173,10 +173,7 @@ class OpenNebulaNetwork(object): def gen_conf(self): global_dns = self.context.get('DNS', "").split() - conf = [] - conf.append('auto lo') - conf.append('iface lo inet loopback') - conf.append('') + conf = ['auto lo', 'iface lo inet loopback', ''] for mac, dev in self.ifaces.items(): mac = mac.lower() diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d0452688..bc4ebc85 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -132,8 +132,7 @@ class Init(object): return initial_dirs def purge_cache(self, rm_instance_lnk=False): - rm_list = [] - rm_list.append(self.paths.boot_finished) + rm_list = [self.paths.boot_finished] if rm_instance_lnk: rm_list.append(self.paths.instance_link) for f in rm_list: diff --git a/cloudinit/util.py b/cloudinit/util.py index 02dc2ce8..b03b80c3 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -716,8 +716,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): def make_url(scheme, host, port=None, path='', params='', query='', fragment=''): - pieces = [] - pieces.append(scheme or '') + pieces = [scheme or ''] netloc = '' if host: -- cgit v1.2.3 From f9f7ffd7156db28391c006d3ac7685fa6c5afbdd Mon Sep 17 00:00:00 2001 From: Rémy Léone Date: Thu, 1 Mar 2018 18:19:43 +0100 Subject: Simplify some comparisions. Just replace a couple things like: if b > a and b < c: with: if a < b < c: --- cloudinit/url_helper.py | 4 ++-- cloudinit/util.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0a5be0b3..4e814a5f 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -47,7 +47,7 @@ try: _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=no-member if _REQ_VER >= LooseVersion('0.8.8'): SSL_ENABLED = True - if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'): + if LooseVersion('0.7.0') <= _REQ_VER < LooseVersion('1.0.0'): CONFIG_ENABLED = True except ImportError: pass @@ -121,7 +121,7 @@ class UrlResponse(object): upper = 300 if redirects_ok: upper = 400 - if self.code >= 200 and self.code < upper: + if 200 <= self.code < upper: return True else: return False diff --git a/cloudinit/util.py b/cloudinit/util.py index b03b80c3..1d6cd1c5 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -546,7 +546,7 @@ def is_ipv4(instr): return False try: - toks = [x for x in toks if int(x) < 256 and int(x) >= 0] + toks = [x for x in toks if 0 <= int(x) < 256] except Exception: return False -- cgit v1.2.3 From b7a790246e52520b47a467fe89b81199b9cf03f6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 9 Mar 2018 04:22:24 +0100 Subject: shellify: raise TypeError on bad input. This makes 2 changes to shellify's behavior: a.) raise a TypeError rather than a RuntimeError. b.) raise a TypeError if input is not a list or tuple. --- cloudinit/tests/test_util.py | 23 ++++++++++++++++++++++ cloudinit/util.py | 14 +++++++++---- .../unittests/test_handler/test_handler_bootcmd.py | 7 ++++--- 3 files changed, 37 insertions(+), 7 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index ba6bf699..c3e2e404 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -44,3 +44,26 @@ class TestUtil(CiTestCase): m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') is_rw = util.mount_is_read_write('/') self.assertEqual(is_rw, False) + + +class TestShellify(CiTestCase): + + def test_input_dict_raises_type_error(self): + self.assertRaisesRegex( + TypeError, 'Input.*was.*dict.*xpected', + util.shellify, {'mykey': 'myval'}) + + def test_input_str_raises_type_error(self): + self.assertRaisesRegex( + TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar") + + def test_value_with_int_raises_type_error(self): + self.assertRaisesRegex( + TypeError, 'shellify.*int', util.shellify, ["foo", 1]) + + def test_supports_strings_and_lists(self): + self.assertEqual( + '\n'.join(["#!/bin/sh", "echo hi mom", "'echo' 'hi dad'", + "'echo' 'hi' 'sis'", ""]), + util.shellify(["echo hi mom", ["echo", "hi dad"], + ('echo', 'hi', 'sis')])) diff --git a/cloudinit/util.py b/cloudinit/util.py index 1d6cd1c5..083a8efe 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1922,6 +1922,11 @@ def abs_join(*paths): # if it is an array, shell protect it (with single ticks) # if it is a string, do nothing def shellify(cmdlist, add_header=True): + if not isinstance(cmdlist, (tuple, list)): + raise TypeError( + "Input to shellify was type '%s'. Expected list or tuple." % + (type_utils.obj_name(cmdlist))) + content = '' if add_header: content += "#!/bin/sh\n" @@ -1930,7 +1935,7 @@ def shellify(cmdlist, add_header=True): for args in cmdlist: # If the item is a list, wrap all items in single tick. # If its not, then just write it directly. - if isinstance(args, list): + if isinstance(args, (list, tuple)): fixed = [] for f in args: fixed.append("'%s'" % (six.text_type(f).replace("'", escaped))) @@ -1940,9 +1945,10 @@ def shellify(cmdlist, add_header=True): content = "%s%s\n" % (content, args) cmds_made += 1 else: - raise RuntimeError(("Unable to shellify type %s" - " which is not a list or string") - % (type_utils.obj_name(args))) + raise TypeError( + "Unable to shellify type '%s'. Expected list, string, tuple. " + "Got: %s" % (type_utils.obj_name(args), args)) + LOG.debug("Shellified %s commands.", cmds_made) return content diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py index dbf43e0d..09d4c681 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/test_handler/test_handler_bootcmd.py @@ -69,7 +69,7 @@ class TestBootcmd(CiTestCase): cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, []) self.assertIn('Failed to shellify bootcmd', self.logs.getvalue()) self.assertEqual( - "'int' object is not iterable", + "Input to shellify was type 'int'. Expected list or tuple.", str(context_manager.exception)) @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency") @@ -98,7 +98,7 @@ class TestBootcmd(CiTestCase): invalid_config = { 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} cc = self._get_cloud('ubuntu') - with self.assertRaises(RuntimeError) as context_manager: + with self.assertRaises(TypeError) as context_manager: cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, []) expected_warnings = [ 'bootcmd.1: 20 is not valid under any of the given schemas', @@ -110,7 +110,8 @@ class TestBootcmd(CiTestCase): self.assertIn(warning, logs) self.assertIn('Failed to shellify', logs) self.assertEqual( - 'Unable to shellify type int which is not a list or string', + ("Unable to shellify type 'int'. Expected list, string, tuple. " + "Got: 20"), str(context_manager.exception)) def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self): -- cgit v1.2.3 From 133ad2cb327ad17b7b81319fac8f9f14577c04df Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 14 Mar 2018 23:38:07 -0600 Subject: set_hostname: When present in metadata, set it before network bringup. When instance meta-data provides hostname information, run cc_set_hostname in the init-local or init-net stage before network comes up. Prevent an initial DHCP request which leaks the stock cloud-image default hostname before the meta-data provided hostname was processed. A leaked cloud-image hostname adversely affects Dynamic DNS which would reallocate 'ubuntu' hostname in DNS to every instance brought up by cloud-init. These instances would only update DNS to the cloud-init configured hostname upon DHCP lease renewal. This branch extends the get_hostname methods in datasource, cloud and util to limit results to metadata_only to avoid extra cost of querying the distro for hostname information if metadata does not provide that information. LP: #1746455 --- cloudinit/cloud.py | 5 +- cloudinit/cmd/main.py | 25 ++++ cloudinit/cmd/tests/test_main.py | 161 +++++++++++++++++++++ cloudinit/config/cc_set_hostname.py | 41 +++++- cloudinit/sources/__init__.py | 21 ++- cloudinit/sources/tests/test_init.py | 70 ++++++++- cloudinit/tests/test_util.py | 74 ++++++++++ cloudinit/util.py | 17 ++- .../test_handler/test_handler_set_hostname.py | 57 +++++++- 9 files changed, 449 insertions(+), 22 deletions(-) create mode 100644 cloudinit/cmd/tests/test_main.py (limited to 'cloudinit/util.py') diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index ba616781..6d12c437 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -78,8 +78,9 @@ class Cloud(object): def get_locale(self): return self.datasource.get_locale() - def get_hostname(self, fqdn=False): - return self.datasource.get_hostname(fqdn=fqdn) + def get_hostname(self, fqdn=False, metadata_only=False): + return self.datasource.get_hostname( + fqdn=fqdn, metadata_only=metadata_only) def device_name_to_device(self, name): return self.datasource.device_name_to_device(name) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index fcddd75c..3f2dbb93 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -40,6 +40,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, from cloudinit import atomic_helper +from cloudinit.config import cc_set_hostname from cloudinit.dhclient_hook import LogDhclient @@ -352,6 +353,11 @@ def main_init(name, args): LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", mode, name, iid, init.is_new_instance()) + if mode == sources.DSMODE_LOCAL: + # Before network comes up, set any configured hostname to allow + # dhcp clients to advertize this hostname to any DDNS services + # LP: #1746455. + _maybe_set_hostname(init, stage='local', retry_stage='network') init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL)) if mode == sources.DSMODE_LOCAL: @@ -368,6 +374,7 @@ def main_init(name, args): init.setup_datasource() # update fully realizes user-data (pulling in #include if necessary) init.update() + _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config') # Stage 7 try: # Attempt to consume the data per instance. @@ -681,6 +688,24 @@ def status_wrapper(name, args, data_d=None, link_d=None): return len(v1[mode]['errors']) +def _maybe_set_hostname(init, stage, retry_stage): + """Call set-hostname if metadata, vendordata or userdata provides it. + + @param stage: String representing current stage in which we are running. + @param retry_stage: String represented logs upon error setting hostname. + """ + cloud = init.cloudify() + (hostname, _fqdn) = util.get_hostname_fqdn( + init.cfg, cloud, metadata_only=True) + if hostname: # meta-data or user-data hostname content + try: + cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) + except cc_set_hostname.SetHostnameError as e: + LOG.debug( + 'Failed setting hostname in %s stage. Will' + ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) + + def main_features(name, args): sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py new file mode 100644 index 00000000..dbe421c0 --- /dev/null +++ b/cloudinit/cmd/tests/test_main.py @@ -0,0 +1,161 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +import copy +import os +from six import StringIO + +from cloudinit.cmd import main +from cloudinit.util import ( + ensure_dir, load_file, write_file, yaml_dumps) +from cloudinit.tests.helpers import ( + FilesystemMockingTestCase, wrap_and_call) + +mypaths = namedtuple('MyPaths', 'run_dir') +myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') + + +class TestMain(FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestMain, self).setUp() + self.new_root = self.tmp_dir() + self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) + os.makedirs(self.cloud_dir) + self.replicateTestRoot('simple_ubuntu', self.new_root) + self.cfg = { + 'datasource_list': ['None'], + 'runcmd': ['ls /etc'], # test ALL_DISTROS + 'system_info': {'paths': {'cloud_dir': self.cloud_dir, + 'run_dir': self.new_root}}, + 'write_files': [ + { + 'path': '/etc/blah.ini', + 'content': 'blah', + 'permissions': 0o755, + }, + ], + 'cloud_init_modules': ['write-files', 'runcmd'], + } + cloud_cfg = yaml_dumps(self.cfg) + ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + self.cloud_cfg_file = os.path.join( + self.new_root, 'etc', 'cloud', 'cloud.cfg') + write_file(self.cloud_cfg_file, cloud_cfg) + self.patchOS(self.new_root) + self.patchUtils(self.new_root) + self.stderr = StringIO() + self.patchStdoutAndStderr(stderr=self.stderr) + + def test_main_init_run_net_stops_on_file_no_net(self): + """When no-net file is present, main_init does not process modules.""" + stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file + write_file(stop_file, '') + cmdargs = myargs( + debug=False, files=None, force=False, local=False, reporter=None, + subcommand='init') + (item1, item2) = wrap_and_call( + 'cloudinit.cmd.main', + {'util.close_stdin': True, + 'netinfo.debug_info': 'my net debug info', + 'util.fixup_output': ('outfmt', 'errfmt')}, + main.main_init, 'init', cmdargs) + # We should not run write_files module + self.assertFalse( + os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')), + 'Unexpected run of write_files module produced blah.ini') + self.assertEqual([], item2) + # Instancify is called + instance_id_path = 'var/lib/cloud/data/instance-id' + self.assertFalse( + os.path.exists(os.path.join(self.new_root, instance_id_path)), + 'Unexpected call to datasource.instancify produced instance-id') + expected_logs = [ + "Exiting. stop file ['{stop_file}'] existed\n".format( + stop_file=stop_file), + 'my net debug info' # netinfo.debug_info + ] + for log in expected_logs: + self.assertIn(log, self.stderr.getvalue()) + + def test_main_init_run_net_runs_modules(self): + """Modules like write_files are run in 'net' mode.""" + cmdargs = myargs( + debug=False, files=None, force=False, local=False, reporter=None, + subcommand='init') + (item1, item2) = wrap_and_call( + 'cloudinit.cmd.main', + {'util.close_stdin': True, + 'netinfo.debug_info': 'my net debug info', + 'util.fixup_output': ('outfmt', 'errfmt')}, + main.main_init, 'init', cmdargs) + self.assertEqual([], item2) + # Instancify is called + instance_id_path = 'var/lib/cloud/data/instance-id' + self.assertEqual( + 'iid-datasource-none\n', + os.path.join(load_file( + os.path.join(self.new_root, instance_id_path)))) + # modules are run (including write_files) + self.assertEqual( + 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) + expected_logs = [ + 'network config is disabled by fallback', # apply_network_config + 'my net debug info', # netinfo.debug_info + 'no previous run detected' + ] + for log in expected_logs: + self.assertIn(log, self.stderr.getvalue()) + + def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): + """When local-hostname metadata is present, call cc_set_hostname.""" + self.cfg['datasource'] = { + 'None': {'metadata': {'local-hostname': 'md-hostname'}}} + cloud_cfg = yaml_dumps(self.cfg) + write_file(self.cloud_cfg_file, cloud_cfg) + cmdargs = myargs( + debug=False, files=None, force=False, local=False, reporter=None, + subcommand='init') + + def set_hostname(name, cfg, cloud, log, args): + self.assertEqual('set-hostname', name) + updated_cfg = copy.deepcopy(self.cfg) + updated_cfg.update( + {'def_log_file': '/var/log/cloud-init.log', + 'log_cfgs': [], + 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'], + 'vendor_data': {'enabled': True, 'prefix': []}}) + updated_cfg.pop('system_info') + + self.assertEqual(updated_cfg, cfg) + self.assertEqual(main.LOG, log) + self.assertIsNone(args) + + (item1, item2) = wrap_and_call( + 'cloudinit.cmd.main', + {'util.close_stdin': True, + 'netinfo.debug_info': 'my net debug info', + 'cc_set_hostname.handle': {'side_effect': set_hostname}, + 'util.fixup_output': ('outfmt', 'errfmt')}, + main.main_init, 'init', cmdargs) + self.assertEqual([], item2) + # Instancify is called + instance_id_path = 'var/lib/cloud/data/instance-id' + self.assertEqual( + 'iid-datasource-none\n', + os.path.join(load_file( + os.path.join(self.new_root, instance_id_path)))) + # modules are run (including write_files) + self.assertEqual( + 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) + expected_logs = [ + 'network config is disabled by fallback', # apply_network_config + 'my net debug info', # netinfo.debug_info + 'no previous run detected' + ] + for log in expected_logs: + self.assertIn(log, self.stderr.getvalue()) + +# vi: ts=4 expandtab diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index aa3dfe5f..3d2b2da3 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -32,22 +32,51 @@ will be used. hostname: """ +import os + + +from cloudinit.atomic_helper import write_json from cloudinit import util +class SetHostnameError(Exception): + """Raised when the distro runs into an exception when setting hostname. + + This may happen if we attempt to set the hostname early in cloud-init's + init-local timeframe as certain services may not be running yet. + """ + pass + + def handle(name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug(("Configuration option 'preserve_hostname' is set," " not setting the hostname in module %s"), name) return - (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) + # Check for previous successful invocation of set-hostname + + # set-hostname artifact file accounts for both hostname and fqdn + # deltas. As such, it's format is different than cc_update_hostname's + # previous-hostname file which only contains the base hostname. + # TODO consolidate previous-hostname and set-hostname artifact files and + # distro._read_hostname implementation so we only validate one artifact. + prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname") + prev_hostname = {} + if os.path.exists(prev_fn): + prev_hostname = util.load_json(util.load_file(prev_fn)) + hostname_changed = (hostname != prev_hostname.get('hostname') or + fqdn != prev_hostname.get('fqdn')) + if not hostname_changed: + log.debug('No hostname changes. Skipping set-hostname') + return + log.debug("Setting the hostname to %s (%s)", fqdn, hostname) try: - log.debug("Setting the hostname to %s (%s)", fqdn, hostname) cloud.distro.set_hostname(hostname, fqdn) - except Exception: - util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn, - hostname) - raise + except Exception as e: + msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname) + util.logexc(log, msg) + raise SetHostnameError("%s: %s" % (msg, e)) + write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn}) # vi: ts=4 expandtab diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a05ca2f6..df0b374a 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -276,21 +276,34 @@ class DataSource(object): return "iid-datasource" return str(self.metadata['instance-id']) - def get_hostname(self, fqdn=False, resolve_ip=False): + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): + """Get hostname or fqdn from the datasource. Look it up if desired. + + @param fqdn: Boolean, set True to return hostname with domain. + @param resolve_ip: Boolean, set True to attempt to resolve an ipv4 + address provided in local-hostname meta-data. + @param metadata_only: Boolean, set True to avoid looking up hostname + if meta-data doesn't have local-hostname present. + + @return: hostname or qualified hostname. Optionally return None when + metadata_only is True and local-hostname data is not available. + """ defdomain = "localdomain" defhost = "localhost" domain = defdomain if not self.metadata or 'local-hostname' not in self.metadata: + if metadata_only: + return None # this is somewhat questionable really. # the cloud datasource was asked for a hostname # and didn't have one. raising error might be more appropriate # but instead, basically look up the existing hostname toks = [] hostname = util.get_hostname() - fqdn = util.get_fqdn_from_hosts(hostname) - if fqdn and fqdn.find(".") > 0: - toks = str(fqdn).split(".") + hosts_fqdn = util.get_fqdn_from_hosts(hostname) + if hosts_fqdn and hosts_fqdn.find(".") > 0: + toks = str(hosts_fqdn).split(".") elif hostname and hostname.find(".") > 0: toks = str(hostname).split(".") elif hostname: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index af151154..5065083c 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -7,7 +7,7 @@ import stat from cloudinit.helpers import Paths from cloudinit.sources import ( INSTANCE_JSON_FILE, DataSource) -from cloudinit.tests.helpers import CiTestCase, skipIf +from cloudinit.tests.helpers import CiTestCase, skipIf, mock from cloudinit.user_data import UserDataProcessor from cloudinit import util @@ -108,6 +108,74 @@ class TestDataSource(CiTestCase): self.assertEqual('userdata_raw', datasource.userdata_raw) self.assertEqual('vendordata_raw', datasource.vendordata_raw) + def test_get_hostname_strips_local_hostname_without_domain(self): + """Datasource.get_hostname strips metadata local-hostname of domain.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + self.assertEqual( + 'test-subclass-hostname', datasource.metadata['local-hostname']) + self.assertEqual('test-subclass-hostname', datasource.get_hostname()) + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' + self.assertEqual('hostname', datasource.get_hostname()) + + def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): + """Datasource.get_hostname with fqdn set gets qualified hostname.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' + self.assertEqual( + 'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) + + def test_get_hostname_without_metadata_uses_system_hostname(self): + """Datasource.gethostname runs util.get_hostname when no metadata.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + m_gethost.return_value = 'systemhostname.domain.com' + m_fqdn.return_value = None # No maching fqdn in /etc/hosts + self.assertEqual('systemhostname', datasource.get_hostname()) + self.assertEqual( + 'systemhostname.domain.com', + datasource.get_hostname(fqdn=True)) + + def test_get_hostname_without_metadata_returns_none(self): + """Datasource.gethostname returns None when metadata_only and no MD.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + self.assertIsNone(datasource.get_hostname(metadata_only=True)) + self.assertIsNone( + datasource.get_hostname(fqdn=True, metadata_only=True)) + self.assertEqual([], m_gethost.call_args_list) + self.assertEqual([], m_fqdn.call_args_list) + + def test_get_hostname_without_metadata_prefers_etc_hosts(self): + """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + m_gethost.return_value = 'systemhostname.domain.com' + m_fqdn.return_value = 'fqdnhostname.domain.com' + self.assertEqual('fqdnhostname', datasource.get_hostname()) + self.assertEqual('fqdnhostname.domain.com', + datasource.get_hostname(fqdn=True)) + def test_get_data_write_json_instance_data(self): """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" tmp = self.tmp_dir() diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index c3e2e404..d30643dc 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -16,6 +16,25 @@ MOUNT_INFO = [ ] +class FakeCloud(object): + + def __init__(self, hostname, fqdn): + self.hostname = hostname + self.fqdn = fqdn + self.calls = [] + + def get_hostname(self, fqdn=None, metadata_only=None): + myargs = {} + if fqdn is not None: + myargs['fqdn'] = fqdn + if metadata_only is not None: + myargs['metadata_only'] = metadata_only + self.calls.append(myargs) + if fqdn: + return self.fqdn + return self.hostname + + class TestUtil(CiTestCase): def test_parse_mount_info_no_opts_no_arg(self): @@ -67,3 +86,58 @@ class TestShellify(CiTestCase): "'echo' 'hi' 'sis'", ""]), util.shellify(["echo hi mom", ["echo", "hi dad"], ('echo', 'hi', 'sis')])) + + +class TestGetHostnameFqdn(CiTestCase): + + def test_get_hostname_fqdn_from_only_cfg_fqdn(self): + """When cfg only has the fqdn key, derive hostname and fqdn from it.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com'}, cloud=None) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): + """When cfg has both fqdn and hostname keys, return them.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None) + self.assertEqual('other', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): + """When cfg has only hostname key which represents a fqdn, use that.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost.domain.com'}, cloud=None) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): + """When cfg has a hostname without a '.' query cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost'}, cloud=mycloud) + self.assertEqual('myhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}], mycloud.calls) + + def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): + """When cfg has neither hostname nor fqdn cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) + self.assertEqual('cloudhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}, + {'metadata_only': False}], mycloud.calls) + + def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): + """Calls to cloud.get_hostname pass the metadata_only parameter.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn( + cfg={}, cloud=mycloud, metadata_only=True) + self.assertEqual( + [{'fqdn': True, 'metadata_only': True}, + {'metadata_only': True}], mycloud.calls) + +# vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 083a8efe..4504f053 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1025,9 +1025,16 @@ def dos2unix(contents): return contents.replace('\r\n', '\n') -def get_hostname_fqdn(cfg, cloud): - # return the hostname and fqdn from 'cfg'. If not found in cfg, - # then fall back to data from cloud +def get_hostname_fqdn(cfg, cloud, metadata_only=False): + """Get hostname and fqdn from config if present and fallback to cloud. + + @param cfg: Dictionary of merged user-data configuration (from init.cfg). + @param cloud: Cloud instance from init.cloudify(). + @param metadata_only: Boolean, set True to only query cloud meta-data, + returning None if not present in meta-data. + @return: a Tuple of strings , . Values can be none when + metadata_only is True and no cfg or metadata provides hostname info. + """ if "fqdn" in cfg: # user specified a fqdn. Default hostname then is based off that fqdn = cfg['fqdn'] @@ -1041,11 +1048,11 @@ def get_hostname_fqdn(cfg, cloud): else: # no fqdn set, get fqdn from cloud. # get hostname from cfg if available otherwise cloud - fqdn = cloud.get_hostname(fqdn=True) + fqdn = cloud.get_hostname(fqdn=True, metadata_only=metadata_only) if "hostname" in cfg: hostname = cfg['hostname'] else: - hostname = cloud.get_hostname() + hostname = cloud.get_hostname(metadata_only=metadata_only) return (hostname, fqdn) diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index abdc17e7..d09ec23a 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -11,6 +11,7 @@ from cloudinit.tests import helpers as t_help from configobj import ConfigObj import logging +import os import shutil from six import BytesIO import tempfile @@ -19,14 +20,18 @@ LOG = logging.getLogger(__name__) class TestHostname(t_help.FilesystemMockingTestCase): + + with_logs = True + def setUp(self): super(TestHostname, self).setUp() self.tmp = tempfile.mkdtemp() + util.ensure_dir(os.path.join(self.tmp, 'data')) self.addCleanup(shutil.rmtree, self.tmp) def _fetch_distro(self, kind): cls = distros.fetch(kind) - paths = helpers.Paths({}) + paths = helpers.Paths({'cloud_dir': self.tmp}) return cls(kind, {}, paths) def test_write_hostname_rhel(self): @@ -34,7 +39,7 @@ class TestHostname(t_help.FilesystemMockingTestCase): 'hostname': 'blah.blah.blah.yahoo.com', } distro = self._fetch_distro('rhel') - paths = helpers.Paths({}) + paths = helpers.Paths({'cloud_dir': self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) @@ -51,7 +56,7 @@ class TestHostname(t_help.FilesystemMockingTestCase): 'hostname': 'blah.blah.blah.yahoo.com', } distro = self._fetch_distro('debian') - paths = helpers.Paths({}) + paths = helpers.Paths({'cloud_dir': self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) @@ -65,7 +70,7 @@ class TestHostname(t_help.FilesystemMockingTestCase): 'hostname': 'blah.blah.blah.suse.com', } distro = self._fetch_distro('sles') - paths = helpers.Paths({}) + paths = helpers.Paths({'cloud_dir': self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) @@ -74,4 +79,48 @@ class TestHostname(t_help.FilesystemMockingTestCase): contents = util.load_file(distro.hostname_conf_fn) self.assertEqual('blah', contents.strip()) + def test_multiple_calls_skips_unchanged_hostname(self): + """Only new hostname or fqdn values will generate a hostname call.""" + distro = self._fetch_distro('debian') + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle( + 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('hostname1', contents.strip()) + cc_set_hostname.handle( + 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + self.assertIn( + 'DEBUG: No hostname changes. Skipping set-hostname\n', + self.logs.getvalue()) + cc_set_hostname.handle( + 'cc_set_hostname', {'hostname': 'hostname2.me.com'}, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('hostname2', contents.strip()) + self.assertIn( + 'Non-persistently setting the system hostname to hostname2', + self.logs.getvalue()) + + def test_error_on_distro_set_hostname_errors(self): + """Raise SetHostnameError on exceptions from distro.set_hostname.""" + distro = self._fetch_distro('debian') + + def set_hostname_error(hostname, fqdn): + raise Exception("OOPS on: %s" % fqdn) + + distro.set_hostname = set_hostname_error + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr: + cc_set_hostname.handle( + 'somename', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + self.assertEqual( + 'Failed to set the hostname to hostname1.me.com (hostname1):' + ' OOPS on: hostname1.me.com', + str(ctx_mgr.exception)) + # vi: ts=4 expandtab -- cgit v1.2.3 From 97012fbb5207ddf1d2dcbb1c5eae710c47ab8ec0 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 15 Mar 2018 11:40:13 -0600 Subject: util: Fix subp regression. Allow specifying subp command as a string. The command provided to subp can either be a string or a list. This patch fixes a regression which raised CalledProcessError whenever providing a string to subp. LP: #1755965 --- cloudinit/util.py | 10 ++++++++-- tests/unittests/test_util.py | 18 ++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/util.py b/cloudinit/util.py index 4504f053..823d80bf 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1874,8 +1874,14 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, # Popen converts entries in the arguments array from non-bytes to bytes. # When locale is unset it may use ascii for that encoding which can # cause UnicodeDecodeErrors. (LP: #1751051) - bytes_args = [x if isinstance(x, six.binary_type) else x.encode("utf-8") - for x in args] + if isinstance(args, six.binary_type): + bytes_args = args + elif isinstance(args, six.string_types): + bytes_args = args.encode("utf-8") + else: + bytes_args = [ + x if isinstance(x, six.binary_type) else x.encode("utf-8") + for x in args] try: sp = subprocess.Popen(bytes_args, stdout=stdout, stderr=stderr, stdin=stdin, diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 89ae40f5..499e7c9f 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -632,6 +632,24 @@ class TestSubp(helpers.CiTestCase): # but by using bash, we remove dependency on another program. return([BASH, '-c', 'printf "$@"', 'printf'] + list(args)) + def test_subp_handles_bytestrings(self): + """subp can run a bytestring command if shell is True.""" + tmp_file = self.tmp_path('test.out') + cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) + (out, _err) = util.subp(cmd.encode('utf-8'), shell=True) + self.assertEqual(u'', out) + self.assertEqual(u'', _err) + self.assertEqual('HI MOM\n', util.load_file(tmp_file)) + + def test_subp_handles_strings(self): + """subp can run a string command if shell is True.""" + tmp_file = self.tmp_path('test.out') + cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) + (out, _err) = util.subp(cmd, shell=True) + self.assertEqual(u'', out) + self.assertEqual(u'', _err) + self.assertEqual('HI MOM\n', util.load_file(tmp_file)) + def test_subp_handles_utf8(self): # The given bytes contain utf-8 accented characters as seen in e.g. # the "deja dup" package in Ubuntu. -- cgit v1.2.3 From a1f678f8ebc080d4737f32275f42947b84ae025a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 16 Mar 2018 13:43:31 -0600 Subject: cc_snap: Add new module to install and configure snapd and snap packages. Support installing and configuring snaps on ubuntu systems. Now, cloud-config files can provide a list or dictionary of snap:assertions which will be allow configuration of snapd on a system via 'snap ack' calls. The snap:commands configuration option supports arbitrary system commands intended to interact with snappy's cli. This allows users to run arbitrary snappy commands to create users, download, install and configure snap packages and snapd. This branch also deprecates old snappy and snap_config modules leaving warnings in documentation and runtime for consumers of these modules. Deprecated snap* modules will be dropped in cloud-init v.18.2 release. --- cloudinit/config/cc_puppet.py | 8 +- cloudinit/config/cc_snap.py | 273 +++++++++++ cloudinit/config/cc_snap_config.py | 7 + cloudinit/config/cc_snappy.py | 8 + cloudinit/config/tests/test_snap.py | 533 +++++++++++++++++++++ cloudinit/util.py | 13 +- config/cloud.cfg.tmpl | 5 +- doc/rtd/conf.py | 1 + doc/rtd/topics/modules.rst | 1 + tests/cloud_tests/releases.yaml | 3 + tests/cloud_tests/testcases.yaml | 3 + tests/cloud_tests/testcases/__init__.py | 3 + tests/cloud_tests/testcases/base.py | 173 ++++++- .../testcases/main/command_output_simple.py | 17 +- tests/cloud_tests/testcases/modules/snap.py | 16 + tests/cloud_tests/testcases/modules/snap.yaml | 18 + tests/cloud_tests/testcases/modules/snappy.py | 2 + tests/cloud_tests/verify.py | 11 +- tests/unittests/test_handler/test_schema.py | 1 + tests/unittests/test_util.py | 33 ++ 20 files changed, 1098 insertions(+), 31 deletions(-) create mode 100644 cloudinit/config/cc_snap.py create mode 100644 cloudinit/config/tests/test_snap.py create mode 100644 tests/cloud_tests/testcases/modules/snap.py create mode 100644 tests/cloud_tests/testcases/modules/snap.yaml (limited to 'cloudinit/util.py') diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 57a170fb..297e0721 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -140,8 +140,9 @@ def handle(name, cfg, cloud, log, _args): # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) - puppet_config.readfp(StringIO(cleaned_contents), - filename=p_constants.conf_path) + puppet_config.readfp( # pylint: disable=W1505 + StringIO(cleaned_contents), + filename=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg['conf'].items(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place @@ -149,8 +150,7 @@ def handle(name, cfg, cloud, log, _args): # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) - util.chownbyname(p_constants.ssl_dir, 'puppet', 'root') - util.ensure_dir(p_constants.ssl_cert_dir) + util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root') util.write_file(p_constants.ssl_cert_path, cfg) util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root') diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py new file mode 100644 index 00000000..db965291 --- /dev/null +++ b/cloudinit/config/cc_snap.py @@ -0,0 +1,273 @@ +# Copyright (C) 2018 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Snap: Install, configure and manage snapd and snap packages.""" + +import sys +from textwrap import dedent + +from cloudinit import log as logging +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) +from cloudinit.settings import PER_INSTANCE +from cloudinit import util + + +distros = ['ubuntu'] +frequency = PER_INSTANCE + +LOG = logging.getLogger(__name__) + +schema = { + 'id': 'cc_snap', + 'name': 'Snap', + 'title': 'Install, configure and manage snapd and snap packages', + 'description': dedent("""\ + This module provides a simple configuration namespace in cloud-init to + both setup snapd and install snaps. + + .. note:: + Both ``assertions`` and ``commands`` values can be either a + dictionary or a list. If these configs are provided as a + dictionary, the keys are only used to order the execution of the + assertions or commands and the dictionary is merged with any + vendor-data snap configuration provided. If a list is provided by + the user instead of a dict, any vendor-data snap configuration is + ignored. + + The ``assertions`` configuration option is a dictionary or list of + properly-signed snap assertions which will run before any snap + ``commands``. They will be added to snapd's assertion database by + invoking ``snap ack ``. + + Snap ``commands`` is a dictionary or list of individual snap + commands to run on the target system. These commands can be used to + create snap users, install snaps and provide snap configuration. + + .. note:: + If 'side-loading' private/unpublished snaps on an instance, it is + best to create a snap seed directory and seed.yaml manifest in + **/var/lib/snapd/seed/** which snapd automatically installs on + startup. + + **Development only**: The ``squashfuse_in_container`` boolean can be + set true to install squashfuse package when in a container to enable + snap installs. Default is false. + """), + 'distros': distros, + 'examples': [dedent("""\ + snap: + assertions: + 00: | + signed_assertion_blob_here + 02: | + signed_assertion_blob_here + commands: + 00: snap create-user --sudoer --known @mydomain.com + 01: snap install canonical-livepatch + 02: canonical-livepatch enable + """), dedent("""\ + # LXC-based containers require squashfuse before snaps can be installed + snap: + commands: + 00: apt-get install squashfuse -y + 11: snap install emoj + + """), dedent("""\ + # Convenience: the snap command can be omitted when specifying commands + # as a list and 'snap' will automatically be prepended. + # The following commands are equivalent: + snap: + commands: + 00: ['install', 'vlc'] + 01: ['snap', 'install', 'vlc'] + 02: snap install vlc + 03: 'snap install vlc' + """)], + 'frequency': PER_INSTANCE, + 'type': 'object', + 'properties': { + 'snap': { + 'type': 'object', + 'properties': { + 'assertions': { + 'type': ['object', 'array'], # Array of strings or dict + 'items': {'type': 'string'}, + 'additionalItems': False, # Reject items non-string + 'minItems': 1, + 'minProperties': 1, + 'uniqueItems': True + }, + 'commands': { + 'type': ['object', 'array'], # Array of strings or dict + 'items': { + 'oneOf': [ + {'type': 'array', 'items': {'type': 'string'}}, + {'type': 'string'}] + }, + 'additionalItems': False, # Reject non-string & non-list + 'minItems': 1, + 'minProperties': 1, + 'uniqueItems': True + }, + 'squashfuse_in_container': { + 'type': 'boolean' + } + }, + 'additionalProperties': False, # Reject keys not in schema + 'required': [], + 'minProperties': 1 + } + } +} + +# TODO schema for 'assertions' and 'commands' are too permissive at the moment. +# Once python-jsonschema supports schema draft 6 add support for arbitrary +# object keys with 'patternProperties' constraint to validate string values. + +__doc__ = get_schema_doc(schema) # Supplement python help() + +SNAP_CMD = "snap" +ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions" + + +def add_assertions(assertions): + """Import list of assertions. + + Import assertions by concatenating each assertion into a + string separated by a '\n'. Write this string to a instance file and + then invoke `snap ack /path/to/file` and check for errors. + If snap exits 0, then all assertions are imported. + """ + if not assertions: + return + LOG.debug('Importing user-provided snap assertions') + if isinstance(assertions, dict): + assertions = assertions.values() + elif not isinstance(assertions, list): + raise TypeError( + 'assertion parameter was not a list or dict: {assertions}'.format( + assertions=assertions)) + + snap_cmd = [SNAP_CMD, 'ack'] + combined = "\n".join(assertions) + + for asrt in assertions: + LOG.debug('Snap acking: %s', asrt.split('\n')[0:2]) + + util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) + util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True) + + +def prepend_snap_commands(commands): + """Ensure user-provided commands start with SNAP_CMD, warn otherwise. + + Each command is either a list or string. Perform the following: + - When the command is a list, pop the first element if it is None + - When the command is a list, insert SNAP_CMD as the first element if + not present. + - When the command is a string containing a non-snap command, warn. + + Support cut-n-paste snap command sets from public snappy documentation. + Allow flexibility to provide non-snap environment/config setup if needed. + + @commands: List of commands. Each command element is a list or string. + + @return: List of 'fixed up' snap commands. + @raise: TypeError on invalid config item type. + """ + warnings = [] + errors = [] + fixed_commands = [] + for command in commands: + if isinstance(command, list): + if command[0] is None: # Avoid warnings by specifying None + command = command[1:] + elif command[0] != SNAP_CMD: # Automatically prepend SNAP_CMD + command.insert(0, SNAP_CMD) + elif isinstance(command, str): + if not command.startswith('%s ' % SNAP_CMD): + warnings.append(command) + else: + errors.append(str(command)) + continue + fixed_commands.append(command) + + if warnings: + LOG.warning( + 'Non-snap commands in snap config:\n%s', '\n'.join(warnings)) + if errors: + raise TypeError( + 'Invalid snap config.' + ' These commands are not a string or list:\n' + '\n'.join(errors)) + return fixed_commands + + +def run_commands(commands): + """Run the provided commands provided in snap:commands configuration. + + Commands are run individually. Any errors are collected and reported + after attempting all commands. + + @param commands: A list or dict containing commands to run. Keys of a + dict will be used to order the commands provided as dict values. + """ + if not commands: + return + LOG.debug('Running user-provided snap commands') + if isinstance(commands, dict): + # Sort commands based on dictionary key + commands = [v for _, v in sorted(commands.items())] + elif not isinstance(commands, list): + raise TypeError( + 'commands parameter was not a list or dict: {commands}'.format( + commands=commands)) + + fixed_snap_commands = prepend_snap_commands(commands) + + cmd_failures = [] + for command in fixed_snap_commands: + shell = isinstance(command, str) + try: + util.subp(command, shell=shell, status_cb=sys.stderr.write) + except util.ProcessExecutionError as e: + cmd_failures.append(str(e)) + if cmd_failures: + msg = 'Failures running snap commands:\n{cmd_failures}'.format( + cmd_failures=cmd_failures) + util.logexc(LOG, msg) + raise RuntimeError(msg) + + +# RELEASE_BLOCKER: Once LP: #1628289 is released on xenial, drop this function. +def maybe_install_squashfuse(cloud): + """Install squashfuse if we are in a container.""" + if not util.is_container(): + return + try: + cloud.distro.update_package_sources() + except Exception as e: + util.logexc(LOG, "Package update failed") + raise + try: + cloud.distro.install_packages(['squashfuse']) + except Exception as e: + util.logexc(LOG, "Failed to install squashfuse") + raise + + +def handle(name, cfg, cloud, log, args): + cfgin = cfg.get('snap', {}) + if not cfgin: + LOG.debug(("Skipping module named %s," + " no 'snap' key in configuration"), name) + return + + validate_cloudconfig_schema(cfg, schema) + if util.is_true(cfgin.get('squashfuse_in_container', False)): + maybe_install_squashfuse(cloud) + add_assertions(cfgin.get('assertions', [])) + run_commands(cfgin.get('commands', [])) + +# vi: ts=4 expandtab diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py index e82c0811..afe297ee 100644 --- a/cloudinit/config/cc_snap_config.py +++ b/cloudinit/config/cc_snap_config.py @@ -4,11 +4,15 @@ # # This file is part of cloud-init. See LICENSE file for license information. +# RELEASE_BLOCKER: Remove this deprecated module in 18.3 """ Snap Config ----------- **Summary:** snap_config modules allows configuration of snapd. +**Deprecated**: Use :ref:`snap` module instead. This module will not exist +in cloud-init 18.3. + This module uses the same ``snappy`` namespace for configuration but acts only only a subset of the configuration. @@ -154,6 +158,9 @@ def handle(name, cfg, cloud, log, args): LOG.debug('No snappy config provided, skipping') return + log.warning( + 'DEPRECATION: snap_config module will be dropped in 18.3 release.' + ' Use snap module instead') if not(util.system_is_snappy()): LOG.debug("%s: system not snappy", name) return diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index eecb8178..bab80bbe 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -1,10 +1,14 @@ # This file is part of cloud-init. See LICENSE file for license information. +# RELEASE_BLOCKER: Remove this deprecated module in 18.3 """ Snappy ------ **Summary:** snappy modules allows configuration of snappy. +**Deprecated**: Use :ref:`snap` module instead. This module will not exist +in cloud-init 18.3. + The below example config config would install ``etcd``, and then install ``pkg2.smoser`` with a ```` argument where ``config-file`` has ``config-blob`` inside it. If ``pkgname`` is installed already, then @@ -271,6 +275,10 @@ def handle(name, cfg, cloud, log, args): LOG.debug("%s: 'auto' mode, and system not snappy", name) return + log.warning( + 'DEPRECATION: snappy module will be dropped in 18.3 release.' + ' Use snap module instead') + set_snappy_command() pkg_ops = get_package_ops(packages=mycfg['packages'], diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py new file mode 100644 index 00000000..c2dd6afe --- /dev/null +++ b/cloudinit/config/tests/test_snap.py @@ -0,0 +1,533 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re +from six import StringIO + +from cloudinit.config.cc_snap import ( + ASSERTIONS_FILE, add_assertions, handle, prepend_snap_commands, + maybe_install_squashfuse, run_commands, schema) +from cloudinit.config.schema import validate_cloudconfig_schema +from cloudinit import util +from cloudinit.tests.helpers import CiTestCase, mock, wrap_and_call + + +SYSTEM_USER_ASSERTION = """\ +type: system-user +authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp +brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp +email: foo@bar.com +password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt +series: +- 16 +since: 2016-09-10T16:34:00+03:00 +until: 2017-11-10T16:34:00+03:00 +username: baz +sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj + +AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP +Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI +zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF +s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj ++to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP +Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS +d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q +BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H +f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V +v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""" + +ACCOUNT_ASSERTION = """\ +type: account-key +authority-id: canonical +revision: 2 +public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0 +account-id: canonical +name: store +since: 2016-04-01T00:00:00.0Z +body-length: 717 +sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH + +AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j +qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482 +vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ +UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK +Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG +o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl +VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9 +2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an +Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc +vUvV7RjVzv17ut0AEQEAAQ== + +AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM +WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b +nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL +3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL +eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY +inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1 +rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+ +rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE +aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ +6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO +haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF +yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9 +HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi +skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK +CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde +ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF +qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR +IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t +oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""" + + +class FakeCloud(object): + def __init__(self, distro): + self.distro = distro + + +class TestAddAssertions(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestAddAssertions, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('cloudinit.config.cc_snap.util.subp') + def test_add_assertions_on_empty_list(self, m_subp): + """When provided with an empty list, add_assertions does nothing.""" + add_assertions([]) + self.assertEqual('', self.logs.getvalue()) + m_subp.assert_not_called() + + def test_add_assertions_on_non_list_or_dict(self): + """When provided an invalid type, add_assertions raises an error.""" + with self.assertRaises(TypeError) as context_manager: + add_assertions(assertions="I'm Not Valid") + self.assertEqual( + "assertion parameter was not a list or dict: I'm Not Valid", + str(context_manager.exception)) + + @mock.patch('cloudinit.config.cc_snap.util.subp') + def test_add_assertions_adds_assertions_as_list(self, m_subp): + """When provided with a list, add_assertions adds all assertions.""" + self.assertEqual( + ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) + assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION] + wrap_and_call( + 'cloudinit.config.cc_snap', + {'ASSERTIONS_FILE': {'new': assert_file}}, + add_assertions, assertions) + self.assertIn( + 'Importing user-provided snap assertions', self.logs.getvalue()) + self.assertIn( + 'sertions', self.logs.getvalue()) + self.assertEqual( + [mock.call(['snap', 'ack', assert_file], capture=True)], + m_subp.call_args_list) + compare_file = self.tmp_path('comparison', dir=self.tmp) + util.write_file(compare_file, '\n'.join(assertions).encode('utf-8')) + self.assertEqual( + util.load_file(compare_file), util.load_file(assert_file)) + + @mock.patch('cloudinit.config.cc_snap.util.subp') + def test_add_assertions_adds_assertions_as_dict(self, m_subp): + """When provided with a dict, add_assertions adds all assertions.""" + self.assertEqual( + ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) + assertions = {'00': SYSTEM_USER_ASSERTION, '01': ACCOUNT_ASSERTION} + wrap_and_call( + 'cloudinit.config.cc_snap', + {'ASSERTIONS_FILE': {'new': assert_file}}, + add_assertions, assertions) + self.assertIn( + 'Importing user-provided snap assertions', self.logs.getvalue()) + self.assertIn( + "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv", + self.logs.getvalue()) + self.assertIn( + "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic", + self.logs.getvalue()) + self.assertEqual( + [mock.call(['snap', 'ack', assert_file], capture=True)], + m_subp.call_args_list) + compare_file = self.tmp_path('comparison', dir=self.tmp) + combined = '\n'.join(assertions.values()) + util.write_file(compare_file, combined.encode('utf-8')) + self.assertEqual( + util.load_file(compare_file), util.load_file(assert_file)) + + +class TestPrepentSnapCommands(CiTestCase): + + with_logs = True + + def test_prepend_snap_commands_errors_on_neither_string_nor_list(self): + """Raise an error for each command which is not a string or list.""" + orig_commands = ['ls', 1, {'not': 'gonna work'}, ['snap', 'list']] + with self.assertRaises(TypeError) as context_manager: + prepend_snap_commands(orig_commands) + self.assertEqual( + "Invalid snap config. These commands are not a string or list:\n" + "1\n{'not': 'gonna work'}", + str(context_manager.exception)) + + def test_prepend_snap_commands_warns_on_non_snap_string_commands(self): + """Warn on each non-snap for commands of type string.""" + orig_commands = ['ls', 'snap list', 'touch /blah', 'snap install x'] + fixed_commands = prepend_snap_commands(orig_commands) + self.assertEqual( + 'WARNING: Non-snap commands in snap config:\n' + 'ls\ntouch /blah\n', + self.logs.getvalue()) + self.assertEqual(orig_commands, fixed_commands) + + def test_prepend_snap_commands_prepends_on_non_snap_list_commands(self): + """Prepend 'snap' for each non-snap command of type list.""" + orig_commands = [['ls'], ['snap', 'list'], ['snapa', '/blah'], + ['snap', 'install', 'x']] + expected = [['snap', 'ls'], ['snap', 'list'], + ['snap', 'snapa', '/blah'], + ['snap', 'install', 'x']] + fixed_commands = prepend_snap_commands(orig_commands) + self.assertEqual('', self.logs.getvalue()) + self.assertEqual(expected, fixed_commands) + + def test_prepend_snap_commands_removes_first_item_when_none(self): + """Remove the first element of a non-snap command when it is None.""" + orig_commands = [[None, 'ls'], ['snap', 'list'], + [None, 'touch', '/blah'], + ['snap', 'install', 'x']] + expected = [['ls'], ['snap', 'list'], + ['touch', '/blah'], + ['snap', 'install', 'x']] + fixed_commands = prepend_snap_commands(orig_commands) + self.assertEqual('', self.logs.getvalue()) + self.assertEqual(expected, fixed_commands) + + +class TestRunCommands(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestRunCommands, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('cloudinit.config.cc_snap.util.subp') + def test_run_commands_on_empty_list(self, m_subp): + """When provided with an empty list, run_commands does nothing.""" + run_commands([]) + self.assertEqual('', self.logs.getvalue()) + m_subp.assert_not_called() + + def test_run_commands_on_non_list_or_dict(self): + """When provided an invalid type, run_commands raises an error.""" + with self.assertRaises(TypeError) as context_manager: + run_commands(commands="I'm Not Valid") + self.assertEqual( + "commands parameter was not a list or dict: I'm Not Valid", + str(context_manager.exception)) + + def test_run_command_logs_commands_and_exit_codes_to_stderr(self): + """All exit codes are logged to stderr.""" + outfile = self.tmp_path('output.log', dir=self.tmp) + + cmd1 = 'echo "HI" >> %s' % outfile + cmd2 = 'bogus command' + cmd3 = 'echo "MOM" >> %s' % outfile + commands = [cmd1, cmd2, cmd3] + + mock_path = 'cloudinit.config.cc_snap.sys.stderr' + with mock.patch(mock_path, new_callable=StringIO) as m_stderr: + with self.assertRaises(RuntimeError) as context_manager: + run_commands(commands=commands) + + self.assertIsNotNone( + re.search(r'bogus: (command )?not found', + str(context_manager.exception)), + msg='Expected bogus command not found') + expected_stderr_log = '\n'.join([ + 'Begin run command: {cmd}'.format(cmd=cmd1), + 'End run command: exit(0)', + 'Begin run command: {cmd}'.format(cmd=cmd2), + 'ERROR: End run command: exit(127)', + 'Begin run command: {cmd}'.format(cmd=cmd3), + 'End run command: exit(0)\n']) + self.assertEqual(expected_stderr_log, m_stderr.getvalue()) + + def test_run_command_as_lists(self): + """When commands are specified as a list, run them in order.""" + outfile = self.tmp_path('output.log', dir=self.tmp) + + cmd1 = 'echo "HI" >> %s' % outfile + cmd2 = 'echo "MOM" >> %s' % outfile + commands = [cmd1, cmd2] + mock_path = 'cloudinit.config.cc_snap.sys.stderr' + with mock.patch(mock_path, new_callable=StringIO): + run_commands(commands=commands) + + self.assertIn( + 'DEBUG: Running user-provided snap commands', + self.logs.getvalue()) + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + self.assertIn( + 'WARNING: Non-snap commands in snap config:', self.logs.getvalue()) + + def test_run_command_dict_sorted_as_command_script(self): + """When commands are a dict, sort them and run.""" + outfile = self.tmp_path('output.log', dir=self.tmp) + cmd1 = 'echo "HI" >> %s' % outfile + cmd2 = 'echo "MOM" >> %s' % outfile + commands = {'02': cmd1, '01': cmd2} + mock_path = 'cloudinit.config.cc_snap.sys.stderr' + with mock.patch(mock_path, new_callable=StringIO): + run_commands(commands=commands) + + expected_messages = [ + 'DEBUG: Running user-provided snap commands'] + for message in expected_messages: + self.assertIn(message, self.logs.getvalue()) + self.assertEqual('MOM\nHI\n', util.load_file(outfile)) + + +class TestSchema(CiTestCase): + + with_logs = True + + def test_schema_warns_on_snap_not_as_dict(self): + """If the snap configuration is not a dict, emit a warning.""" + validate_cloudconfig_schema({'snap': 'wrong type'}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap: 'wrong type' is not of type" + " 'object'\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_disallows_unknown_keys(self, _): + """Unknown keys in the snap configuration emit warnings.""" + validate_cloudconfig_schema( + {'snap': {'commands': ['ls'], 'invalid-key': ''}}, schema) + self.assertIn( + 'WARNING: Invalid config:\nsnap: Additional properties are not' + " allowed ('invalid-key' was unexpected)", + self.logs.getvalue()) + + def test_warn_schema_requires_either_commands_or_assertions(self): + """Warn when snap configuration lacks both commands and assertions.""" + validate_cloudconfig_schema( + {'snap': {}}, schema) + self.assertIn( + 'WARNING: Invalid config:\nsnap: {} does not have enough' + ' properties', + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_warn_schema_commands_is_not_list_or_dict(self, _): + """Warn when snap:commands config is not a list or dict.""" + validate_cloudconfig_schema( + {'snap': {'commands': 'broken'}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap.commands: 'broken' is not of type" + " 'object', 'array'\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_warn_schema_when_commands_is_empty(self, _): + """Emit warnings when snap:commands is an empty list or dict.""" + validate_cloudconfig_schema( + {'snap': {'commands': []}}, schema) + validate_cloudconfig_schema( + {'snap': {'commands': {}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap.commands: [] is too short\n" + "WARNING: Invalid config:\nsnap.commands: {} does not have enough" + " properties\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_when_commands_are_list_or_dict(self, _): + """No warnings when snap:commands are either a list or dict.""" + validate_cloudconfig_schema( + {'snap': {'commands': ['valid']}}, schema) + validate_cloudconfig_schema( + {'snap': {'commands': {'01': 'also valid'}}}, schema) + self.assertEqual('', self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.add_assertions') + def test_warn_schema_assertions_is_not_list_or_dict(self, _): + """Warn when snap:assertions config is not a list or dict.""" + validate_cloudconfig_schema( + {'snap': {'assertions': 'broken'}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap.assertions: 'broken' is not of" + " type 'object', 'array'\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.add_assertions') + def test_warn_schema_when_assertions_is_empty(self, _): + """Emit warnings when snap:assertions is an empty list or dict.""" + validate_cloudconfig_schema( + {'snap': {'assertions': []}}, schema) + validate_cloudconfig_schema( + {'snap': {'assertions': {}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap.assertions: [] is too short\n" + "WARNING: Invalid config:\nsnap.assertions: {} does not have" + " enough properties\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.add_assertions') + def test_schema_when_assertions_are_list_or_dict(self, _): + """No warnings when snap:assertions are a list or dict.""" + validate_cloudconfig_schema( + {'snap': {'assertions': ['valid']}}, schema) + validate_cloudconfig_schema( + {'snap': {'assertions': {'01': 'also valid'}}}, schema) + self.assertEqual('', self.logs.getvalue()) + + +class TestHandle(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestHandle, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch('cloudinit.config.cc_snap.add_assertions') + @mock.patch('cloudinit.config.cc_snap.validate_cloudconfig_schema') + def test_handle_no_config(self, m_schema, m_add, m_run): + """When no snap-related configuration is provided, nothing happens.""" + cfg = {} + handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertIn( + "DEBUG: Skipping module named snap, no 'snap' key in config", + self.logs.getvalue()) + m_schema.assert_not_called() + m_add.assert_not_called() + m_run.assert_not_called() + + @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch('cloudinit.config.cc_snap.add_assertions') + @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') + def test_handle_skips_squashfuse_when_unconfigured(self, m_squash, m_add, + m_run): + """When squashfuse_in_container is unset, don't attempt to install.""" + handle( + 'snap', cfg={'snap': {}}, cloud=None, log=self.logger, args=None) + handle( + 'snap', cfg={'snap': {'squashfuse_in_container': None}}, + cloud=None, log=self.logger, args=None) + handle( + 'snap', cfg={'snap': {'squashfuse_in_container': False}}, + cloud=None, log=self.logger, args=None) + self.assertEqual([], m_squash.call_args_list) # No calls + # snap configuration missing assertions and commands will default to [] + self.assertIn(mock.call([]), m_add.call_args_list) + self.assertIn(mock.call([]), m_run.call_args_list) + + @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') + def test_handle_tries_to_install_squashfuse(self, m_squash): + """If squashfuse_in_container is True, try installing squashfuse.""" + cfg = {'snap': {'squashfuse_in_container': True}} + mycloud = FakeCloud(None) + handle('snap', cfg=cfg, cloud=mycloud, log=self.logger, args=None) + self.assertEqual( + [mock.call(mycloud)], m_squash.call_args_list) + + def test_handle_runs_commands_provided(self): + """If commands are specified as a list, run them.""" + outfile = self.tmp_path('output.log', dir=self.tmp) + + cfg = { + 'snap': {'commands': ['echo "HI" >> %s' % outfile, + 'echo "MOM" >> %s' % outfile]}} + handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + + @mock.patch('cloudinit.config.cc_snap.util.subp') + def test_handle_adds_assertions(self, m_subp): + """Any configured snap assertions are provided to add_assertions.""" + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) + compare_file = self.tmp_path('comparison', dir=self.tmp) + cfg = { + 'snap': {'assertions': [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}} + wrap_and_call( + 'cloudinit.config.cc_snap', + {'ASSERTIONS_FILE': {'new': assert_file}}, + handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) + content = '\n'.join(cfg['snap']['assertions']) + util.write_file(compare_file, content.encode('utf-8')) + self.assertEqual( + util.load_file(compare_file), util.load_file(assert_file)) + + @mock.patch('cloudinit.config.cc_snap.util.subp') + def test_handle_validates_schema(self, m_subp): + """Any provided configuration is runs validate_cloudconfig_schema.""" + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) + cfg = {'snap': {'invalid': ''}} # Generates schema warning + wrap_and_call( + 'cloudinit.config.cc_snap', + {'ASSERTIONS_FILE': {'new': assert_file}}, + handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + "WARNING: Invalid config:\nsnap: Additional properties are not" + " allowed ('invalid' was unexpected)\n", + self.logs.getvalue()) + + +class TestMaybeInstallSquashFuse(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestMaybeInstallSquashFuse, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('cloudinit.config.cc_snap.util.is_container') + def test_maybe_install_squashfuse_skips_non_containers(self, m_container): + """maybe_install_squashfuse does nothing when not on a container.""" + m_container.return_value = False + maybe_install_squashfuse(cloud=FakeCloud(None)) + self.assertEqual([mock.call()], m_container.call_args_list) + self.assertEqual('', self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.util.is_container') + def test_maybe_install_squashfuse_raises_install_errors(self, m_container): + """maybe_install_squashfuse logs and raises package install errors.""" + m_container.return_value = True + distro = mock.MagicMock() + distro.update_package_sources.side_effect = RuntimeError( + 'Some apt error') + with self.assertRaises(RuntimeError) as context_manager: + maybe_install_squashfuse(cloud=FakeCloud(distro)) + self.assertEqual('Some apt error', str(context_manager.exception)) + self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.util.is_container') + def test_maybe_install_squashfuse_raises_update_errors(self, m_container): + """maybe_install_squashfuse logs and raises package update errors.""" + m_container.return_value = True + distro = mock.MagicMock() + distro.update_package_sources.side_effect = RuntimeError( + 'Some apt error') + with self.assertRaises(RuntimeError) as context_manager: + maybe_install_squashfuse(cloud=FakeCloud(distro)) + self.assertEqual('Some apt error', str(context_manager.exception)) + self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.util.is_container') + def test_maybe_install_squashfuse_happy_path(self, m_container): + """maybe_install_squashfuse logs and raises package install errors.""" + m_container.return_value = True + distro = mock.MagicMock() # No errors raised + maybe_install_squashfuse(cloud=FakeCloud(distro)) + self.assertEqual( + [mock.call()], distro.update_package_sources.call_args_list) + self.assertEqual( + [mock.call(['squashfuse'])], + distro.install_packages.call_args_list) + +# vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 823d80bf..cae8b196 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1827,7 +1827,8 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, - logstring=False, decode="replace", target=None, update_env=None): + logstring=False, decode="replace", target=None, update_env=None, + status_cb=None): # not supported in cloud-init (yet), for now kept in the call signature # to ease maintaining code shared between cloud-init and curtin @@ -1848,6 +1849,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, if target_path(target) != "/": args = ['chroot', target] + list(args) + if status_cb: + command = ' '.join(args) if isinstance(args, list) else args + status_cb('Begin run command: {command}\n'.format(command=command)) if not logstring: LOG.debug(("Running command %s with allowed return codes %s" " (shell=%s, capture=%s)"), args, rcs, shell, capture) @@ -1888,6 +1892,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, env=env, shell=shell) (out, err) = sp.communicate(data) except OSError as e: + if status_cb: + status_cb('ERROR: End run command: invalid command provided\n') raise ProcessExecutionError( cmd=args, reason=e, errno=e.errno, stdout="-" if decode else b"-", @@ -1912,9 +1918,14 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, rc = sp.returncode if rc not in rcs: + if status_cb: + status_cb( + 'ERROR: End run command: exit({code})\n'.format(code=rc)) raise ProcessExecutionError(stdout=out, stderr=err, exit_code=rc, cmd=args) + if status_cb: + status_cb('End run command: exit({code})\n'.format(code=rc)) return (out, err) diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index cf2e2409..56a34fab 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -72,7 +72,8 @@ cloud_config_modules: # Emit the cloud config ready event # this can be used by upstart jobs for 'start on cloud-config'. - emit_upstart - - snap_config + - snap + - snap_config # DEPRECATED- Drop in version 18.2 {% endif %} - ssh-import-id - locale @@ -102,7 +103,7 @@ cloud_config_modules: # The modules that run in the 'final' stage cloud_final_modules: {% if variant in ["ubuntu", "unknown", "debian"] %} - - snappy + - snappy # DEPRECATED- Drop in version 18.2 {% endif %} - package-update-upgrade-install {% if variant in ["ubuntu", "unknown", "debian"] %} diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 0ea3b6bf..50eb05cf 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -29,6 +29,7 @@ project = 'Cloud-Init' extensions = [ 'sphinx.ext.intersphinx', 'sphinx.ext.autodoc', + 'sphinx.ext.autosectionlabel', 'sphinx.ext.viewcode', ] diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index 7b146751..a0f68129 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -45,6 +45,7 @@ Modules .. automodule:: cloudinit.config.cc_seed_random .. automodule:: cloudinit.config.cc_set_hostname .. automodule:: cloudinit.config.cc_set_passwords +.. automodule:: cloudinit.config.cc_snap .. automodule:: cloudinit.config.cc_snappy .. automodule:: cloudinit.config.cc_snap_config .. automodule:: cloudinit.config.cc_spacewalk diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index d8bc170f..c7dcbe83 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -30,6 +30,9 @@ default_release_config: mirror_url: https://cloud-images.ubuntu.com/daily mirror_dir: '/srv/citest/images' keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg + # The OS version formatted as Major.Minor is used to compare releases + version: null # Each release needs to define this, for example 16.04 + ec2: # Choose from: [ebs, instance-store] root-store: ebs diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml index 8e0fb62f..a3e29900 100644 --- a/tests/cloud_tests/testcases.yaml +++ b/tests/cloud_tests/testcases.yaml @@ -15,6 +15,9 @@ base_test_data: instance-id: | #!/bin/sh cat /run/cloud-init/.instance-id + instance-data.json: | + #!/bin/sh + cat /run/cloud-init/instance-data.json result.json: | #!/bin/sh cat /run/cloud-init/result.json diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py index a29a0928..bd548f5a 100644 --- a/tests/cloud_tests/testcases/__init__.py +++ b/tests/cloud_tests/testcases/__init__.py @@ -7,6 +7,8 @@ import inspect import unittest from unittest.util import strclass +from cloudinit.util import read_conf + from tests.cloud_tests import config from tests.cloud_tests.testcases.base import CloudTestCase as base_test @@ -48,6 +50,7 @@ def get_suite(test_name, data, conf): def setUpClass(cls): cls.data = data cls.conf = conf + cls.release_conf = read_conf(config.RELEASES_CONF)['releases'] suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(tmp)) diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 20e95955..324c7c91 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -4,10 +4,14 @@ import crypt import json +import re import unittest + from cloudinit import util as c_util +SkipTest = unittest.SkipTest + class CloudTestCase(unittest.TestCase): """Base test class for verifiers.""" @@ -16,6 +20,43 @@ class CloudTestCase(unittest.TestCase): data = {} conf = None _cloud_config = None + release_conf = {} # The platform's os release configuration + + expected_warnings = () # Subclasses set to ignore expected WARN logs + + @property + def os_cfg(self): + return self.release_conf[self.os_name]['default'] + + def is_distro(self, distro_name): + return self.os_cfg['os'] == distro_name + + def os_version_cmp(self, cmp_version): + """Compare the version of the test to comparison_version. + + @param: cmp_version: Either a float or a string representing + a release os from releases.yaml (e.g. centos66) + + @return: -1 when version < cmp_version, 0 when version=cmp_version and + 1 when version > cmp_version. + """ + version = self.release_conf[self.os_name]['default']['version'] + if isinstance(cmp_version, str): + cmp_version = self.release_conf[cmp_version]['default']['version'] + if version < cmp_version: + return -1 + elif version == cmp_version: + return 0 + else: + return 1 + + @property + def os_name(self): + return self.data.get('os_name', 'UNKNOWN') + + @property + def platform(self): + return self.data.get('platform', 'UNKNOWN') @property def cloud_config(self): @@ -72,12 +113,134 @@ class CloudTestCase(unittest.TestCase): self.assertEqual(len(result['errors']), 0) def test_no_warnings_in_log(self): - """Warnings should not be found in the log.""" + """Unexpected warnings should not be found in the log.""" + warnings = [ + l for l in self.get_data_file('cloud-init.log').splitlines() + if 'WARN' in l] + joined_warnings = '\n'.join(warnings) + for expected_warning in self.expected_warnings: + self.assertIn( + expected_warning, joined_warnings, + msg="Did not find %s in cloud-init.log" % expected_warning) + # Prune expected from discovered warnings + warnings = [w for w in warnings if expected_warning not in w] + self.assertEqual( + [], warnings, msg="'WARN' found inside cloud-init.log") + + def test_instance_data_json_ec2(self): + """Validate instance-data.json content by ec2 platform. + + This content is sourced by snapd when determining snapstore endpoints. + We validate expected values per cloud type to ensure we don't break + snapd. + """ + if self.platform != 'ec2': + raise SkipTest( + 'Skipping ec2 instance-data.json on %s' % self.platform) + out = self.get_data_file('instance-data.json') + if not out: + if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0: + raise AssertionError( + 'No instance-data.json found on %s' % self.os_name) + raise SkipTest( + 'Skipping instance-data.json test.' + ' OS: %s not bionic or newer' % self.os_name) + instance_data = json.loads(out) + self.assertEqual( + ['ds/user-data'], instance_data['base64-encoded-keys']) + ds = instance_data.get('ds', {}) + macs = ds.get('network', {}).get('interfaces', {}).get('macs', {}) + if not macs: + raise AssertionError('No network data from EC2 meta-data') + # Check meta-data items we depend on + expected_net_keys = [ + 'public-ipv4s', 'ipv4-associations', 'local-hostname', + 'public-hostname'] + for mac, mac_data in macs.items(): + for key in expected_net_keys: + self.assertIn(key, mac_data) + self.assertIsNotNone( + ds.get('placement', {}).get('availability-zone'), + 'Could not determine EC2 Availability zone placement') + ds = instance_data.get('ds', {}) + v1_data = instance_data.get('v1', {}) + self.assertIsNotNone( + v1_data['availability-zone'], 'expected ec2 availability-zone') + self.assertEqual('aws', v1_data['cloud-name']) + self.assertIn('i-', v1_data['instance-id']) + self.assertIn('ip-', v1_data['local-hostname']) + self.assertIsNotNone(v1_data['region'], 'expected ec2 region') + + def test_instance_data_json_lxd(self): + """Validate instance-data.json content by lxd platform. + + This content is sourced by snapd when determining snapstore endpoints. + We validate expected values per cloud type to ensure we don't break + snapd. + """ + if self.platform != 'lxd': + raise SkipTest( + 'Skipping lxd instance-data.json on %s' % self.platform) + out = self.get_data_file('instance-data.json') + if not out: + if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0: + raise AssertionError( + 'No instance-data.json found on %s' % self.os_name) + raise SkipTest( + 'Skipping instance-data.json test.' + ' OS: %s not bionic or newer' % self.os_name) + instance_data = json.loads(out) + v1_data = instance_data.get('v1', {}) + self.assertEqual( + ['ds/user-data', 'ds/vendor-data'], + sorted(instance_data['base64-encoded-keys'])) + self.assertEqual('nocloud', v1_data['cloud-name']) + self.assertIsNone( + v1_data['availability-zone'], + 'found unexpected lxd availability-zone %s' % + v1_data['availability-zone']) + self.assertIn('cloud-test', v1_data['instance-id']) + self.assertIn('cloud-test', v1_data['local-hostname']) + self.assertIsNone( + v1_data['region'], + 'found unexpected lxd region %s' % v1_data['region']) + + def test_instance_data_json_kvm(self): + """Validate instance-data.json content by nocloud-kvm platform. + + This content is sourced by snapd when determining snapstore endpoints. + We validate expected values per cloud type to ensure we don't break + snapd. + """ + if self.platform != 'nocloud-kvm': + raise SkipTest( + 'Skipping nocloud-kvm instance-data.json on %s' % + self.platform) + out = self.get_data_file('instance-data.json') + if not out: + if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0: + raise AssertionError( + 'No instance-data.json found on %s' % self.os_name) + raise SkipTest( + 'Skipping instance-data.json test.' + ' OS: %s not bionic or newer' % self.os_name) + instance_data = json.loads(out) + v1_data = instance_data.get('v1', {}) self.assertEqual( - [], - [l for l in self.get_data_file('cloud-init.log').splitlines() - if 'WARN' in l], - msg="'WARN' found inside cloud-init.log") + ['ds/user-data'], instance_data['base64-encoded-keys']) + self.assertEqual('nocloud', v1_data['cloud-name']) + self.assertIsNone( + v1_data['availability-zone'], + 'found unexpected kvm availability-zone %s' % + v1_data['availability-zone']) + self.assertIsNotNone( + re.match('[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}', + v1_data['instance-id']), + 'kvm instance-id is not a UUID: %s' % v1_data['instance-id']) + self.assertIn('ubuntu', v1_data['local-hostname']) + self.assertIsNone( + v1_data['region'], + 'found unexpected lxd region %s' % v1_data['region']) class PasswordListTest(CloudTestCase): diff --git a/tests/cloud_tests/testcases/main/command_output_simple.py b/tests/cloud_tests/testcases/main/command_output_simple.py index 857881cb..80a2c8d7 100644 --- a/tests/cloud_tests/testcases/main/command_output_simple.py +++ b/tests/cloud_tests/testcases/main/command_output_simple.py @@ -7,6 +7,8 @@ from tests.cloud_tests.testcases import base class TestCommandOutputSimple(base.CloudTestCase): """Test functionality of simple output redirection.""" + expected_warnings = ('Stdout, stderr changing to',) + def test_output_file(self): """Ensure that the output file is not empty and has all stages.""" data = self.get_data_file('cloud-init-test-output') @@ -15,20 +17,5 @@ class TestCommandOutputSimple(base.CloudTestCase): data.splitlines()[-1].strip()) # TODO: need to test that all stages redirected here - def test_no_warnings_in_log(self): - """Warnings should not be found in the log. - - This class redirected stderr and stdout, so it expects to find - a warning in cloud-init.log to that effect.""" - redirect_msg = 'Stdout, stderr changing to' - warnings = [ - l for l in self.get_data_file('cloud-init.log').splitlines() - if 'WARN' in l] - self.assertEqual( - [], [w for w in warnings if redirect_msg not in w], - msg="'WARN' found inside cloud-init.log") - self.assertEqual( - 1, len(warnings), - msg="Did not find %s in cloud-init.log" % redirect_msg) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/snap.py b/tests/cloud_tests/testcases/modules/snap.py new file mode 100644 index 00000000..ff68abbe --- /dev/null +++ b/tests/cloud_tests/testcases/modules/snap.py @@ -0,0 +1,16 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""cloud-init Integration Test Verify Script""" +from tests.cloud_tests.testcases import base + + +class TestSnap(base.CloudTestCase): + """Test snap module""" + + def test_snappy_version(self): + """Expect hello-world and core snaps are installed.""" + out = self.get_data_file('snaplist') + self.assertIn('core', out) + self.assertIn('hello-world', out) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/snap.yaml b/tests/cloud_tests/testcases/modules/snap.yaml new file mode 100644 index 00000000..44043f31 --- /dev/null +++ b/tests/cloud_tests/testcases/modules/snap.yaml @@ -0,0 +1,18 @@ +# +# Install snappy +# +required_features: + - snap +cloud_config: | + #cloud-config + package_update: true + snap: + squashfuse_in_container: true + commands: + - snap install hello-world +collect_scripts: + snaplist: | + #!/bin/bash + snap list + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/snappy.py b/tests/cloud_tests/testcases/modules/snappy.py index b92271c1..7d17fc5b 100644 --- a/tests/cloud_tests/testcases/modules/snappy.py +++ b/tests/cloud_tests/testcases/modules/snappy.py @@ -7,6 +7,8 @@ from tests.cloud_tests.testcases import base class TestSnappy(base.CloudTestCase): """Test snappy module""" + expected_warnings = ('DEPRECATION',) + def test_snappy_version(self): """Test snappy version output""" out = self.get_data_file('snapd') diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index 2a9fd520..5a68a484 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -8,13 +8,16 @@ import unittest from tests.cloud_tests import (config, LOG, util, testcases) -def verify_data(base_dir, tests): +def verify_data(data_dir, platform, os_name, tests): """Verify test data is correct. - @param base_dir: base directory for data + @param data_dir: top level directory for all tests + @param platform: The platform name we for this test data (e.g. lxd) + @param os_name: The operating system under test (xenial, artful, etc.). @param tests: list of test names @return_value: {: {passed: True/False, failures: []}} """ + base_dir = os.sep.join((data_dir, platform, os_name)) runner = unittest.TextTestRunner(verbosity=util.current_verbosity()) res = {} for test_name in tests: @@ -26,7 +29,7 @@ def verify_data(base_dir, tests): cloud_conf = test_conf['cloud_config'] # load script outputs - data = {} + data = {'platform': platform, 'os_name': os_name} test_dir = os.path.join(base_dir, test_name) for script_name in os.listdir(test_dir): with open(os.path.join(test_dir, script_name), 'rb') as fp: @@ -73,7 +76,7 @@ def verify(args): # run test res[platform][os_name] = verify_data( - os.sep.join((args.data_dir, platform, os_name)), + args.data_dir, platform, os_name, tests[platform][os_name]) # handle results diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 1ecb6c68..9b50ee79 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -26,6 +26,7 @@ class GetSchemaTest(CiTestCase): 'cc_ntp', 'cc_resizefs', 'cc_runcmd', + 'cc_snap', 'cc_zypper_add_repo' ], [subschema['id'] for subschema in schema['allOf']]) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 499e7c9f..67d9607d 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -785,6 +785,39 @@ class TestSubp(helpers.CiTestCase): decode=False) self.assertEqual(self.utf8_valid, out) + def test_bogus_command_logs_status_messages(self): + """status_cb gets status messages logs on bogus commands provided.""" + logs = [] + + def status_cb(log): + logs.append(log) + + with self.assertRaises(util.ProcessExecutionError): + util.subp([self.bogus_command], status_cb=status_cb) + + expected = [ + 'Begin run command: {cmd}\n'.format(cmd=self.bogus_command), + 'ERROR: End run command: invalid command provided\n'] + self.assertEqual(expected, logs) + + def test_command_logs_exit_codes_to_status_cb(self): + """status_cb gets status messages containing command exit code.""" + logs = [] + + def status_cb(log): + logs.append(log) + + with self.assertRaises(util.ProcessExecutionError): + util.subp(['ls', '/I/dont/exist'], status_cb=status_cb) + util.subp(['ls'], status_cb=status_cb) + + expected = [ + 'Begin run command: ls /I/dont/exist\n', + 'ERROR: End run command: exit(2)\n', + 'Begin run command: ls\n', + 'End run command: exit(0)\n'] + self.assertEqual(expected, logs) + class TestEncode(helpers.TestCase): """Test the encoding functions""" -- cgit v1.2.3 From e0f644b7c8c76bd63d242558685722cc70d9c51d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 23 Mar 2018 17:17:55 -0600 Subject: IBMCloud: Initial IBM Cloud datasource. This adds a specific IBM Cloud datasource. IBM Cloud is identified by: a.) running on xen b.) one of a LABEL=METADATA disk or a LABEL=config-2 disk with UUID=9796-932E The datasource contains its own config-drive reader that reads only the currently supported portion of config-drive needed for ibm cloud. During the provisioning boot, cloud-init is disabled. See the docstring in DataSourceIBMCloud.py for more more information. --- cloudinit/sources/DataSourceConfigDrive.py | 10 + cloudinit/sources/DataSourceIBMCloud.py | 325 +++++++++++++++++++++++ cloudinit/tests/test_util.py | 72 +++++ cloudinit/util.py | 31 +++ tests/unittests/test_datasource/test_ibmcloud.py | 262 ++++++++++++++++++ tests/unittests/test_ds_identify.py | 104 +++++++- tools/ds-identify | 65 ++++- 7 files changed, 857 insertions(+), 12 deletions(-) create mode 100644 cloudinit/sources/DataSourceIBMCloud.py create mode 100644 tests/unittests/test_datasource/test_ibmcloud.py (limited to 'cloudinit/util.py') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b8db6267..c7b5fe5f 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -14,6 +14,7 @@ from cloudinit import util from cloudinit.net import eni +from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform from cloudinit.sources.helpers import openstack LOG = logging.getLogger(__name__) @@ -255,6 +256,15 @@ def find_candidate_devs(probe_optical=True): # an unpartitioned block device (ex sda, not sda1) devices = [d for d in candidates if d in by_label or not util.is_partition(d)] + + if devices: + # IBMCloud uses config-2 label, but limited to a single UUID. + ibm_platform, ibm_path = get_ibm_platform() + if ibm_path in devices: + devices.remove(ibm_path) + LOG.debug("IBMCloud device '%s' (%s) removed from candidate list", + ibm_path, ibm_platform) + return devices diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py new file mode 100644 index 00000000..02b3d56f --- /dev/null +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -0,0 +1,325 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Datasource for IBMCloud. + +IBMCloud is also know as SoftLayer or BlueMix. +IBMCloud hypervisor is xen (2018-03-10). + +There are 2 different api exposed launch methods. + * template: This is the legacy method of launching instances. + When booting from an image template, the system boots first into + a "provisioning" mode. There, host <-> guest mechanisms are utilized + to execute code in the guest and provision it. + + Cloud-init will disable itself when it detects that it is in the + provisioning mode. It detects this by the presence of + a file '/root/provisioningConfiguration.cfg'. + + When provided with user-data, the "first boot" will contain a + ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data + provided, then there is no data-source. + + Cloud-init never does any network configuration in this mode. + + * os_code: Essentially "launch by OS Code" (Operating System Code). + This is a more modern approach. There is no specific "provisioning" boot. + Instead, cloud-init does all the customization. With or without + user-data provided, an OpenStack ConfigDrive like disk is attached. + + Only disks with label 'config-2' and UUID '9796-932E' are considered. + This is to avoid this datasource claiming ConfigDrive. This does + mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be + incorrectly identified as IBMCloud. + +TODO: + * is uuid (/sys/hypervisor/uuid) stable for life of an instance? + it seems it is not the same as data's uuid in the os_code case + but is in the template case. + +""" +import base64 +import json +import os + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit.sources.helpers import openstack +from cloudinit import util + +LOG = logging.getLogger(__name__) + +IBM_CONFIG_UUID = "9796-932E" + + +class Platforms(object): + TEMPLATE_LIVE_METADATA = "Template/Live/Metadata" + TEMPLATE_LIVE_NODATA = "UNABLE TO BE IDENTIFIED." + TEMPLATE_PROVISIONING_METADATA = "Template/Provisioning/Metadata" + TEMPLATE_PROVISIONING_NODATA = "Template/Provisioning/No-Metadata" + OS_CODE = "OS-Code/Live" + + +PROVISIONING = ( + Platforms.TEMPLATE_PROVISIONING_METADATA, + Platforms.TEMPLATE_PROVISIONING_NODATA) + + +class DataSourceIBMCloud(sources.DataSource): + + dsname = 'IBMCloud' + system_uuid = None + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceIBMCloud, self).__init__(sys_cfg, distro, paths) + self.source = None + self._network_config = None + self.network_json = None + self.platform = None + + def __str__(self): + root = super(DataSourceIBMCloud, self).__str__() + mstr = "%s [%s %s]" % (root, self.platform, self.source) + return mstr + + def _get_data(self): + results = read_md() + if results is None: + return False + + self.source = results['source'] + self.platform = results['platform'] + self.metadata = results['metadata'] + self.userdata_raw = results.get('userdata') + self.network_json = results.get('networkdata') + vd = results.get('vendordata') + self.vendordata_pure = vd + self.system_uuid = results['system-uuid'] + try: + self.vendordata_raw = sources.convert_vendordata(vd) + except ValueError as e: + LOG.warning("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + + return True + + def check_instance_id(self, sys_cfg): + """quickly (local check only) if self.instance_id is still valid + + in Template mode, the system uuid (/sys/hypervisor/uuid) is the + same as found in the METADATA disk. But that is not true in OS_CODE + mode. So we read the system_uuid and keep that for later compare.""" + if self.system_uuid is None: + return False + return self.system_uuid == _read_system_uuid() + + @property + def network_config(self): + if self.platform != Platforms.OS_CODE: + # If deployed from template, an agent in the provisioning + # environment handles networking configuration. Not cloud-init. + return {'config': 'disabled', 'version': 1} + if self._network_config is None: + if self.network_json is not None: + LOG.debug("network config provided via network_json") + self._network_config = openstack.convert_net_json( + self.network_json, known_macs=None) + else: + LOG.debug("no network configuration available.") + return self._network_config + + +def _read_system_uuid(): + uuid_path = "/sys/hypervisor/uuid" + if not os.path.isfile(uuid_path): + return None + return util.load_file(uuid_path).strip().lower() + + +def _is_xen(): + return os.path.exists("/proc/xen") + + +def _is_ibm_provisioning(): + return os.path.exists("/root/provisioningConfiguration.cfg") + + +def get_ibm_platform(): + """Return a tuple (Platform, path) + + If this is Not IBM cloud, then the return value is (None, None). + An instance in provisioning mode is considered running on IBM cloud.""" + label_mdata = "METADATA" + label_cfg2 = "CONFIG-2" + not_found = (None, None) + + if not _is_xen(): + return not_found + + # fslabels contains only the first entry with a given label. + fslabels = {} + try: + devs = util.blkid() + except util.ProcessExecutionError as e: + LOG.warning("Failed to run blkid: %s", e) + return (None, None) + + for dev in sorted(devs.keys()): + data = devs[dev] + label = data.get("LABEL", "").upper() + uuid = data.get("UUID", "").upper() + if label not in (label_mdata, label_cfg2): + continue + if label in fslabels: + LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s", + label, fslabels[label], data) + continue + if label == label_cfg2 and uuid != IBM_CONFIG_UUID: + LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s", + dev, label, uuid, data) + continue + fslabels[label] = data + + metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME') + cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME') + + if cfg2_path: + return (Platforms.OS_CODE, cfg2_path) + elif metadata_path: + if _is_ibm_provisioning(): + return (Platforms.TEMPLATE_PROVISIONING_METADATA, metadata_path) + else: + return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) + elif _is_ibm_provisioning(): + return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) + return not_found + + +def read_md(): + """Read data from IBM Cloud. + + @return: None if not running on IBM Cloud. + dictionary with guaranteed fields: metadata, version + and optional fields: userdata, vendordata, networkdata. + Also includes the system uuid from /sys/hypervisor/uuid.""" + platform, path = get_ibm_platform() + if platform is None: + LOG.debug("This is not an IBMCloud platform.") + return None + elif platform in PROVISIONING: + LOG.debug("Cloud-init is disabled during provisioning: %s.", + platform) + return None + + ret = {'platform': platform, 'source': path, + 'system-uuid': _read_system_uuid()} + + try: + if os.path.isdir(path): + results = metadata_from_dir(path) + else: + results = util.mount_cb(path, metadata_from_dir) + except BrokenMetadata as e: + raise RuntimeError( + "Failed reading IBM config disk (platform=%s path=%s): %s" % + (platform, path, e)) + + ret.update(results) + return ret + + +class BrokenMetadata(IOError): + pass + + +def metadata_from_dir(source_dir): + """Walk source_dir extracting standardized metadata. + + Certain metadata keys are renamed to present a standardized set of metadata + keys. + + This function has a lot in common with ConfigDriveReader.read_v2 but + there are a number of inconsistencies, such key renames and as only + presenting a 'latest' version which make it an unlikely candidate to share + code. + + @return: Dict containing translated metadata, userdata, vendordata, + networkdata as present. + """ + + def opath(fname): + return os.path.join("openstack", "latest", fname) + + def load_json_bytes(blob): + return json.loads(blob.decode('utf-8')) + + files = [ + # tuples of (results_name, path, translator) + ('metadata_raw', opath('meta_data.json'), load_json_bytes), + ('userdata', opath('user_data'), None), + ('vendordata', opath('vendor_data.json'), load_json_bytes), + ('networkdata', opath('network_data.json'), load_json_bytes), + ] + + results = {} + for (name, path, transl) in files: + fpath = os.path.join(source_dir, path) + raw = None + try: + raw = util.load_file(fpath, decode=False) + except IOError as e: + LOG.debug("Failed reading path '%s': %s", fpath, e) + + if raw is None or transl is None: + data = raw + else: + try: + data = transl(raw) + except Exception as e: + raise BrokenMetadata("Failed decoding %s: %s" % (path, e)) + + results[name] = data + + if results.get('metadata_raw') is None: + raise BrokenMetadata( + "%s missing required file 'meta_data.json'" % source_dir) + + results['metadata'] = {} + + md_raw = results['metadata_raw'] + md = results['metadata'] + if 'random_seed' in md_raw: + try: + md['random_seed'] = base64.b64decode(md_raw['random_seed']) + except (ValueError, TypeError) as e: + raise BrokenMetadata( + "Badly formatted metadata random_seed entry: %s" % e) + + renames = ( + ('public_keys', 'public-keys'), ('hostname', 'local-hostname'), + ('uuid', 'instance-id')) + for mdname, newname in renames: + if mdname in md_raw: + md[newname] = md_raw[mdname] + + return results + + +# Used to match classes to dependencies +datasources = [ + (DataSourceIBMCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata') + args = parser.parse_args() + data = read_md() + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index d30643dc..3f37dbb6 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -3,6 +3,7 @@ """Tests for cloudinit.util""" import logging +from textwrap import dedent import cloudinit.util as util @@ -140,4 +141,75 @@ class TestGetHostnameFqdn(CiTestCase): [{'fqdn': True, 'metadata_only': True}, {'metadata_only': True}], mycloud.calls) + +class TestBlkid(CiTestCase): + ids = { + "id01": "1111-1111", + "id02": "22222222-2222", + "id03": "33333333-3333", + "id04": "44444444-4444", + "id05": "55555555-5555-5555-5555-555555555555", + "id06": "66666666-6666-6666-6666-666666666666", + "id07": "52894610484658920398", + "id08": "86753098675309867530", + "id09": "99999999-9999-9999-9999-999999999999", + } + + blkid_out = dedent("""\ + /dev/loop0: TYPE="squashfs" + /dev/loop1: TYPE="squashfs" + /dev/loop2: TYPE="squashfs" + /dev/loop3: TYPE="squashfs" + /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" + /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" + /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" + /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ + """TYPE="zfs_member" PARTUUID="{id09}" + /dev/loop4: TYPE="squashfs" + """) + + maxDiff = None + + def _get_expected(self): + return ({ + "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, + "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, + "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, + "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, + "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, + "/dev/sda1": {"DEVNAME": "/dev/sda1", "TYPE": "vfat", + "UUID": self.ids["id01"], + "PARTUUID": self.ids["id02"]}, + "/dev/sda2": {"DEVNAME": "/dev/sda2", "TYPE": "ext4", + "UUID": self.ids["id03"], + "PARTUUID": self.ids["id04"]}, + "/dev/sda3": {"DEVNAME": "/dev/sda3", "TYPE": "ext4", + "UUID": self.ids["id05"], + "PARTUUID": self.ids["id06"]}, + "/dev/sda4": {"DEVNAME": "/dev/sda4", "TYPE": "zfs_member", + "LABEL": "default", + "UUID": self.ids["id07"], + "UUID_SUB": self.ids["id08"], + "PARTUUID": self.ids["id09"]}, + }) + + @mock.patch("cloudinit.util.subp") + def test_functional_blkid(self, m_subp): + m_subp.return_value = ( + self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), util.blkid()) + m_subp.assert_called_with(["blkid", "-o", "full"], capture=True, + decode="replace") + + @mock.patch("cloudinit.util.subp") + def test_blkid_no_cache_uses_no_cache(self, m_subp): + """blkid should turn off cache if disable_cache is true.""" + m_subp.return_value = ( + self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), + util.blkid(disable_cache=True)) + m_subp.assert_called_with(["blkid", "-o", "full", "-c", "/dev/null"], + capture=True, decode="replace") + + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index cae8b196..fb4ee5fe 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1237,6 +1237,37 @@ def find_devs_with(criteria=None, oformat='device', return entries +def blkid(devs=None, disable_cache=False): + """Get all device tags details from blkid. + + @param devs: Optional list of device paths you wish to query. + @param disable_cache: Bool, set True to start with clean cache. + + @return: Dict of key value pairs of info for the device. + """ + if devs is None: + devs = [] + else: + devs = list(devs) + + cmd = ['blkid', '-o', 'full'] + if disable_cache: + cmd.extend(['-c', '/dev/null']) + cmd.extend(devs) + + # we have to decode with 'replace' as shelx.split (called by + # load_shell_content) can't take bytes. So this is potentially + # lossy of non-utf-8 chars in blkid output. + out, _ = subp(cmd, capture=True, decode="replace") + ret = {} + for line in out.splitlines(): + dev, _, data = line.partition(":") + ret[dev] = load_shell_content(data) + ret[dev]["DEVNAME"] = dev + + return ret + + def peek_file(fname, max_bytes): LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes) with open(fname, 'rb') as ifh: diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/test_datasource/test_ibmcloud.py new file mode 100644 index 00000000..621cfe49 --- /dev/null +++ b/tests/unittests/test_datasource/test_ibmcloud.py @@ -0,0 +1,262 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.sources import DataSourceIBMCloud as ibm +from cloudinit.tests import helpers as test_helpers + +import base64 +import copy +import json +import mock +from textwrap import dedent + +D_PATH = "cloudinit.sources.DataSourceIBMCloud." + + +class TestIBMCloud(test_helpers.CiTestCase): + """Test the datasource.""" + def setUp(self): + super(TestIBMCloud, self).setUp() + pass + + +@mock.patch(D_PATH + "_is_xen", return_value=True) +@mock.patch(D_PATH + "_is_ibm_provisioning") +@mock.patch(D_PATH + "util.blkid") +class TestGetIBMPlatform(test_helpers.CiTestCase): + """Test the get_ibm_platform helper.""" + + blkid_base = { + "/dev/xvda1": { + "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs", + "TYPE": "ext3"}, + "/dev/xvda2": { + "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs", + "TYPE": "ext4"}, + } + + blkid_metadata_disk = { + "/dev/xvdh1": { + "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": "681B-8C5D", + "PARTUUID": "3d631e09-01"}, + } + + blkid_oscode_disk = { + "/dev/xvdh": { + "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID} + } + + def setUp(self): + self.blkid_metadata = copy.deepcopy(self.blkid_base) + self.blkid_metadata.update(copy.deepcopy(self.blkid_metadata_disk)) + + self.blkid_oscode = copy.deepcopy(self.blkid_base) + self.blkid_oscode.update(copy.deepcopy(self.blkid_oscode_disk)) + + def test_id_template_live_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_LIVE_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = False + self.assertEqual( + (ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_NODATA.""" + m_blkid.return_value = self.blkid_base + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None), + ibm.get_ibm_platform()) + + def test_id_os_code(self, m_blkid, m_is_prov, _m_xen): + """Identify OS_CODE.""" + m_blkid.return_value = self.blkid_oscode + m_is_prov.return_value = False + self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"), + ibm.get_ibm_platform()) + + def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen): + """Test against false positive on openstack with non-ibm UUID.""" + blkid = self.blkid_oscode + blkid["/dev/xvdh"]["UUID"] = "9999-9999" + m_blkid.return_value = blkid + m_is_prov.return_value = False + self.assertEqual((None, None), ibm.get_ibm_platform()) + + +@mock.patch(D_PATH + "_read_system_uuid", return_value=None) +@mock.patch(D_PATH + "get_ibm_platform") +class TestReadMD(test_helpers.CiTestCase): + """Test the read_datasource helper.""" + + template_md = { + "files": [], + "network_config": {"content_path": "/content/interfaces"}, + "hostname": "ci-fond-ram", + "name": "ci-fond-ram", + "domain": "testing.ci.cloud-init.org", + "meta": {"dsmode": "net"}, + "uuid": "8e636730-9f5d-c4a5-327c-d7123c46e82f", + "public_keys": {"1091307": "ssh-rsa AAAAB3NzaC1...Hw== ci-pubkey"}, + } + + oscode_md = { + "hostname": "ci-grand-gannet.testing.ci.cloud-init.org", + "name": "ci-grand-gannet", + "uuid": "2f266908-8e6c-4818-9b5c-42e9cc66a785", + "random_seed": "bm90LXJhbmRvbQo=", + "crypt_key": "ssh-rsa AAAAB3NzaC1yc2..n6z/", + "configuration_token": "eyJhbGciOi..M3ZA", + "public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"}, + } + + content_interfaces = dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + allow-hotplug eth0 + iface eth0 inet static + address 10.82.43.5 + netmask 255.255.255.192 + """) + + userdata = b"#!/bin/sh\necho hi mom\n" + # meta.js file gets json encoded userdata as a list. + meta_js = '["#!/bin/sh\necho hi mom\n"]' + vendor_data = { + "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"} + + network_data = { + "links": [ + {"id": "interface_29402281", "name": "eth0", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"}, + {"id": "interface_29402279", "name": "eth1", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"} + ], + "networks": [ + {"id": "network_109887563", "link": "interface_29402281", + "type": "ipv4", "ip_address": "10.82.43.2", + "netmask": "255.255.255.192", + "routes": [ + {"network": "10.0.0.0", "netmask": "255.0.0.0", + "gateway": "10.82.43.1"}, + {"network": "161.26.0.0", "netmask": "255.255.0.0", + "gateway": "10.82.43.1"}]}, + {"id": "network_109887551", "link": "interface_29402279", + "type": "ipv4", "ip_address": "108.168.194.252", + "netmask": "255.255.255.248", + "routes": [ + {"network": "0.0.0.0", "netmask": "0.0.0.0", + "gateway": "108.168.194.249"}]} + ], + "services": [ + {"type": "dns", "address": "10.0.80.11"}, + {"type": "dns", "address": "10.0.80.12"} + ], + } + + sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f' + + def _get_expected_metadata(self, os_md): + """return expected 'metadata' for data loaded from meta_data.json.""" + os_md = copy.deepcopy(os_md) + renames = ( + ('hostname', 'local-hostname'), + ('uuid', 'instance-id'), + ('public_keys', 'public-keys')) + ret = {} + for osname, mdname in renames: + if osname in os_md: + ret[mdname] = os_md[osname] + if 'random_seed' in os_md: + ret['random_seed'] = base64.b64decode(os_md['random_seed']) + + return ret + + def test_provisioning_md(self, m_platform, m_sysuuid): + """Provisioning env with a metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh") + self.assertIsNone(ibm.read_md()) + + def test_provisioning_no_metadata(self, m_platform, m_sysuuid): + """Provisioning env with no metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None) + self.assertIsNone(ibm.read_md()) + + def test_provisioning_not_ibm(self, m_platform, m_sysuuid): + """Provisioning env but not identified as IBM should return None.""" + m_platform.return_value = (None, None) + self.assertIsNone(ibm.read_md()) + + def test_template_live(self, m_platform, m_sysuuid): + """Template live environment should be identified.""" + tmpdir = self.tmp_dir() + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir) + m_sysuuid.return_value = self.sysuuid + + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.template_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/content/interfaces': self.content_interfaces, + 'meta.js': self.meta_js}) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, + ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.template_md), + ret['metadata']) + self.assertEqual(self.sysuuid, ret['system-uuid']) + + def test_os_code_live(self, m_platform, m_sysuuid): + """Verify an os_code metadata path.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + netdata = json.dumps(self.network_data) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + 'openstack/latest/network_data.json': netdata, + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): + """Verify os_code without user-data.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertIsNone(ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 85999b7a..53643989 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -9,6 +9,8 @@ from cloudinit import util from cloudinit.tests.helpers import ( CiTestCase, dir2dict, populate_dir) +from cloudinit.sources import DataSourceIBMCloud as dsibm + UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") UNAME_PPC64EL = ("Linux diamond 4.4.0-83-generic #106-Ubuntu SMP " @@ -37,8 +39,8 @@ BLKID_UEFI_UBUNTU = [ POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled" POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" -DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" -DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=disabled" +DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=disabled" +DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=enabled" DI_EC2_STRICT_ID_DEFAULT = "true" OVF_MATCH_STRING = 'http://schemas.dmtf.org/ovf/environment/1' @@ -64,6 +66,9 @@ P_SYS_VENDOR = "sys/class/dmi/id/sys_vendor" P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" +IBM_PROVISIONING_CHECK_PATH = "/root/provisioningConfiguration.cfg" +IBM_CONFIG_UUID = "9796-932E" + MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0} @@ -239,6 +244,57 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('ConfigDriveUpper') return + def test_ibmcloud_template_userdata_in_provisioning(self): + """Template provisioned with user-data during provisioning stage. + + Template provisioning with user-data has METADATA disk, + datasource should return not found.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-metadata']) + data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + return self._check_via_dict(data, RC_NOT_FOUND) + + def test_ibmcloud_template_userdata(self): + """Template provisioned with user-data first boot. + + Template provisioning with user-data has METADATA disk. + datasource should return found.""" + self._test_ds_found('IBMCloud-metadata') + + def test_ibmcloud_template_no_userdata_in_provisioning(self): + """Template provisioned with no user-data during provisioning. + + no disks attached. Datasource should return not found.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-nodisks']) + data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + return self._check_via_dict(data, RC_NOT_FOUND) + + def test_ibmcloud_template_no_userdata(self): + """Template provisioned with no user-data first boot. + + no disks attached. Datasource should return found.""" + self._check_via_dict(VALID_CFG['IBMCloud-nodisks'], RC_NOT_FOUND) + + def test_ibmcloud_os_code(self): + """Launched by os code always has config-2 disk.""" + self._test_ds_found('IBMCloud-config-2') + + def test_ibmcloud_os_code_different_uuid(self): + """IBM cloud config-2 disks must be explicit match on UUID. + + If the UUID is not 9796-932E then we actually expect ConfigDrive.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + offset = None + for m, d in enumerate(data['mocks']): + if d.get('name') == "blkid": + offset = m + break + if not offset: + raise ValueError("Expected to find 'blkid' mock, but did not.") + data['mocks'][offset]['out'] = d['out'].replace(dsibm.IBM_CONFIG_UUID, + "DEAD-BEEF") + self._check_via_dict( + data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE]) + def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. @@ -452,7 +508,7 @@ VALID_CFG = { }, 'Ec2-xen': { 'ds': 'Ec2', - 'mocks': [{'name': 'detect_virt', 'RET': 'xen', 'ret': 0}], + 'mocks': [MOCK_VIRT_IS_XEN], 'files': { 'sys/hypervisor/uuid': 'ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n' }, @@ -579,6 +635,48 @@ VALID_CFG = { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, }, + 'IBMCloud-metadata': { + 'ds': 'IBMCloud', + 'mocks': [ + MOCK_VIRT_IS_XEN, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'xvda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'METADATA'}]), + }, + ], + }, + 'IBMCloud-config-2': { + 'ds': 'IBMCloud', + 'mocks': [ + MOCK_VIRT_IS_XEN, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(), + 'UUID': uuid4(), 'LABEL': 'cloudimg-bootfs'}, + {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'config-2', + 'UUID': dsibm.IBM_CONFIG_UUID}, + {'DEVNAME': 'xvda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4(), + 'UUID': uuid4()}, + ]), + }, + ], + }, + 'IBMCloud-nodisks': { + 'ds': 'IBMCloud', + 'mocks': [ + MOCK_VIRT_IS_XEN, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'xvda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}]), + }, + ], + }, } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index e2552c8b..9a2db5c4 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -92,6 +92,7 @@ DI_DMI_SYS_VENDOR="" DI_DMI_PRODUCT_SERIAL="" DI_DMI_PRODUCT_UUID="" DI_FS_LABELS="" +DI_FS_UUIDS="" DI_ISO9660_DEVS="" DI_KERNEL_CMDLINE="" DI_VIRT="" @@ -114,7 +115,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner" +OVF SmartOS Scaleway Hetzner IBMCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -123,6 +124,8 @@ DI_ON_NOTFOUND="" DI_EC2_STRICT_ID_DEFAULT="true" +_IS_IBM_CLOUD="" + error() { set -- "ERROR:" "$@"; debug 0 "$@" @@ -196,7 +199,7 @@ read_fs_info() { return fi local oifs="$IFS" line="" delim="," - local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" + local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" uuids="" out=$(blkid -c /dev/null -o export) || { ret=$? error "failed running [$ret]: blkid -c /dev/null -o export" @@ -219,12 +222,14 @@ read_fs_info() { LABEL=*) label="${line#LABEL=}"; labels="${labels}${line#LABEL=}${delim}";; TYPE=*) ftype=${line#TYPE=};; + UUID=*) uuids="${uuids}${line#UUID=}$delim";; esac done [ -n "$dev" -a "$ftype" = "iso9660" ] && isodevs="${isodevs} ${dev}=$label" DI_FS_LABELS="${labels%${delim}}" + DI_FS_UUIDS="${uuids%${delim}}" DI_ISO9660_DEVS="${isodevs# }" } @@ -437,14 +442,25 @@ dmi_sys_vendor_is() { [ "${DI_DMI_SYS_VENDOR}" = "$1" ] } -has_fs_with_label() { - local label="$1" - case ",${DI_FS_LABELS}," in - *,$label,*) return 0;; +has_fs_with_uuid() { + case ",${DI_FS_UUIDS}," in + *,$1,*) return 0;; esac return 1 } +has_fs_with_label() { + # has_fs_with_label(label1[ ,label2 ..]) + # return 0 if a there is a filesystem that matches any of the labels. + local label="" + for label in "$@"; do + case ",${DI_FS_LABELS}," in + *,$label,*) return 0;; + esac + done + return 1 +} + nocase_equal() { # nocase_equal(a, b) # return 0 if case insenstive comparision a.lower() == b.lower() @@ -583,6 +599,8 @@ dscheck_NoCloud() { case " ${DI_DMI_PRODUCT_SERIAL} " in *\ ds=nocloud*) return ${DS_FOUND};; esac + + is_ibm_cloud && return ${DS_NOT_FOUND} for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} @@ -594,9 +612,8 @@ dscheck_NoCloud() { } check_configdrive_v2() { - if has_fs_with_label "config-2"; then - return ${DS_FOUND} - elif has_fs_with_label "CONFIG-2"; then + is_ibm_cloud && return ${DS_NOT_FOUND} + if has_fs_with_label CONFIG-2 config-2; then return ${DS_FOUND} fi # look in /config-drive /seed/config_drive for a directory @@ -988,6 +1005,36 @@ dscheck_Hetzner() { return ${DS_NOT_FOUND} } +is_ibm_provisioning() { + [ -f "${PATH_ROOT}/root/provisioningConfiguration.cfg" ] +} + +is_ibm_cloud() { + cached "${_IS_IBM_CLOUD}" && return ${_IS_IBM_CLOUD} + local ret=1 + if [ "$DI_VIRT" = "xen" ]; then + if is_ibm_provisioning; then + ret=0 + elif has_fs_with_label METADATA metadata; then + ret=0 + elif has_fs_with_uuid 9796-932E && + has_fs_with_label CONFIG-2 config-2; then + ret=0 + fi + fi + _IS_IBM_CLOUD=$ret + return $ret +} + +dscheck_IBMCloud() { + if is_ibm_provisioning; then + debug 1 "cloud-init disabled during provisioning on IBMCloud" + return ${DS_NOT_FOUND} + fi + is_ibm_cloud && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + collect_info() { read_virt read_pid1_product_name -- cgit v1.2.3 From 20e3ddab7f55c2bf5e700c69fd24a0ac2206dbcf Mon Sep 17 00:00:00 2001 From: Dominic Schlegel Date: Tue, 27 Mar 2018 10:10:42 -0400 Subject: FreeBSD: resizefs module now able to handle zfs/zpool. Previously there was no support at all for zfs file system. With this change it is now possible to use the resizefs module to grow a zpool to its maximum partition size on FreeBSD. LP: #1721243 --- cloudinit/config/cc_resizefs.py | 22 ++++++++ cloudinit/util.py | 44 ++++++++++++---- tests/data/mount_parse_ext.txt | 19 +++++++ tests/data/mount_parse_zfs.txt | 21 ++++++++ tests/data/zpool_status_simple.txt | 10 ++++ .../test_handler/test_handler_resizefs.py | 58 +++++++++++++++++++++- tests/unittests/test_util.py | 50 +++++++++++++++++++ 7 files changed, 214 insertions(+), 10 deletions(-) create mode 100644 tests/data/mount_parse_ext.txt create mode 100644 tests/data/mount_parse_zfs.txt create mode 100644 tests/data/zpool_status_simple.txt (limited to 'cloudinit/util.py') diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index cec22bb7..c8e1752f 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -84,6 +84,10 @@ def _resize_ufs(mount_point, devpth): return ('growfs', devpth) +def _resize_zfs(mount_point, devpth): + return ('zpool', 'online', '-e', mount_point, devpth) + + def _get_dumpfs_output(mount_point): dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) return dumpfs_res @@ -148,6 +152,7 @@ RESIZE_FS_PREFIXES_CMDS = [ ('ext', _resize_ext), ('xfs', _resize_xfs), ('ufs', _resize_ufs), + ('zfs', _resize_zfs), ] RESIZE_FS_PRECHECK_CMDS = { @@ -188,6 +193,13 @@ def maybe_get_writable_device_path(devpath, info, log): log.debug("Not attempting to resize devpath '%s': %s", devpath, info) return None + # FreeBSD zpool can also just use gpt/